summaryrefslogtreecommitdiff
path: root/vendor/github.com/hashicorp
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/hashicorp')
-rw-r--r--vendor/github.com/hashicorp/errwrap/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/errwrap/README.md89
-rw-r--r--vendor/github.com/hashicorp/errwrap/errwrap.go169
-rw-r--r--vendor/github.com/hashicorp/go-getter/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/go-getter/README.md253
-rw-r--r--vendor/github.com/hashicorp/go-getter/client.go335
-rw-r--r--vendor/github.com/hashicorp/go-getter/client_mode.go24
-rw-r--r--vendor/github.com/hashicorp/go-getter/copy_dir.go78
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress.go29
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_bzip2.go45
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_gzip.go49
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_tar.go83
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_tbz2.go33
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_testing.go135
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_tgz.go39
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_zip.go96
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect.go97
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_bitbucket.go66
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_file.go67
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_github.go73
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_s3.go61
-rw-r--r--vendor/github.com/hashicorp/go-getter/folder_storage.go65
-rw-r--r--vendor/github.com/hashicorp/go-getter/get.go139
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file.go32
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file_unix.go103
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file_windows.go120
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_git.go225
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_hg.go131
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_http.go227
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_mock.go52
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_s3.go243
-rw-r--r--vendor/github.com/hashicorp/go-getter/helper/url/url.go14
-rw-r--r--vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go11
-rw-r--r--vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go40
-rw-r--r--vendor/github.com/hashicorp/go-getter/netrc.go67
-rw-r--r--vendor/github.com/hashicorp/go-getter/source.go36
-rw-r--r--vendor/github.com/hashicorp/go-getter/storage.go13
-rw-r--r--vendor/github.com/hashicorp/go-multierror/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/go-multierror/README.md97
-rw-r--r--vendor/github.com/hashicorp/go-multierror/append.go41
-rw-r--r--vendor/github.com/hashicorp/go-multierror/flatten.go26
-rw-r--r--vendor/github.com/hashicorp/go-multierror/format.go27
-rw-r--r--vendor/github.com/hashicorp/go-multierror/multierror.go51
-rw-r--r--vendor/github.com/hashicorp/go-multierror/prefix.go37
-rw-r--r--vendor/github.com/hashicorp/go-plugin/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/go-plugin/README.md160
-rw-r--r--vendor/github.com/hashicorp/go-plugin/client.go666
-rw-r--r--vendor/github.com/hashicorp/go-plugin/discover.go28
-rw-r--r--vendor/github.com/hashicorp/go-plugin/error.go24
-rw-r--r--vendor/github.com/hashicorp/go-plugin/mux_broker.go204
-rw-r--r--vendor/github.com/hashicorp/go-plugin/plugin.go25
-rw-r--r--vendor/github.com/hashicorp/go-plugin/process.go24
-rw-r--r--vendor/github.com/hashicorp/go-plugin/process_posix.go19
-rw-r--r--vendor/github.com/hashicorp/go-plugin/process_windows.go29
-rw-r--r--vendor/github.com/hashicorp/go-plugin/rpc_client.go123
-rw-r--r--vendor/github.com/hashicorp/go-plugin/rpc_server.go185
-rw-r--r--vendor/github.com/hashicorp/go-plugin/server.go235
-rw-r--r--vendor/github.com/hashicorp/go-plugin/server_mux.go31
-rw-r--r--vendor/github.com/hashicorp/go-plugin/stream.go18
-rw-r--r--vendor/github.com/hashicorp/go-plugin/testing.go76
-rw-r--r--vendor/github.com/hashicorp/go-uuid/LICENSE363
-rw-r--r--vendor/github.com/hashicorp/go-uuid/README.md8
-rw-r--r--vendor/github.com/hashicorp/go-uuid/uuid.go65
-rw-r--r--vendor/github.com/hashicorp/go-version/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/go-version/README.md65
-rw-r--r--vendor/github.com/hashicorp/go-version/constraint.go178
-rw-r--r--vendor/github.com/hashicorp/go-version/version.go322
-rw-r--r--vendor/github.com/hashicorp/go-version/version_collection.go17
-rw-r--r--vendor/github.com/hashicorp/hcl/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/hcl/README.md125
-rw-r--r--vendor/github.com/hashicorp/hcl/decoder.go724
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl.go11
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/ast/ast.go219
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/ast/walk.go52
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/parser/error.go17
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/parser/parser.go514
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go651
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go241
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/token/position.go46
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/token/token.go219
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/flatten.go117
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/parser.go313
-rw-r--r--vendor/github.com/hashicorp/hcl/json/scanner/scanner.go451
-rw-r--r--vendor/github.com/hashicorp/hcl/json/token/position.go46
-rw-r--r--vendor/github.com/hashicorp/hcl/json/token/token.go118
-rw-r--r--vendor/github.com/hashicorp/hcl/lex.go38
-rw-r--r--vendor/github.com/hashicorp/hcl/parse.go39
-rw-r--r--vendor/github.com/hashicorp/hil/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/hil/README.md102
-rw-r--r--vendor/github.com/hashicorp/hil/ast/arithmetic.go43
-rw-r--r--vendor/github.com/hashicorp/hil/ast/arithmetic_op.go24
-rw-r--r--vendor/github.com/hashicorp/hil/ast/ast.go99
-rw-r--r--vendor/github.com/hashicorp/hil/ast/call.go47
-rw-r--r--vendor/github.com/hashicorp/hil/ast/conditional.go36
-rw-r--r--vendor/github.com/hashicorp/hil/ast/index.go76
-rw-r--r--vendor/github.com/hashicorp/hil/ast/literal.go88
-rw-r--r--vendor/github.com/hashicorp/hil/ast/output.go78
-rw-r--r--vendor/github.com/hashicorp/hil/ast/scope.go90
-rw-r--r--vendor/github.com/hashicorp/hil/ast/stack.go25
-rw-r--r--vendor/github.com/hashicorp/hil/ast/type_string.go54
-rw-r--r--vendor/github.com/hashicorp/hil/ast/unknown.go30
-rw-r--r--vendor/github.com/hashicorp/hil/ast/variable_access.go36
-rw-r--r--vendor/github.com/hashicorp/hil/ast/variables_helper.go63
-rw-r--r--vendor/github.com/hashicorp/hil/builtins.go331
-rw-r--r--vendor/github.com/hashicorp/hil/check_identifier.go88
-rw-r--r--vendor/github.com/hashicorp/hil/check_types.go662
-rw-r--r--vendor/github.com/hashicorp/hil/convert.go159
-rw-r--r--vendor/github.com/hashicorp/hil/eval.go472
-rw-r--r--vendor/github.com/hashicorp/hil/eval_type.go16
-rw-r--r--vendor/github.com/hashicorp/hil/evaltype_string.go42
-rw-r--r--vendor/github.com/hashicorp/hil/parse.go29
-rw-r--r--vendor/github.com/hashicorp/hil/parser/binary_op.go45
-rw-r--r--vendor/github.com/hashicorp/hil/parser/error.go38
-rw-r--r--vendor/github.com/hashicorp/hil/parser/fuzz.go28
-rw-r--r--vendor/github.com/hashicorp/hil/parser/parser.go522
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/peeker.go55
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/scanner.go550
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/token.go105
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/tokentype_string.go51
-rw-r--r--vendor/github.com/hashicorp/hil/transform_fixed.go29
-rw-r--r--vendor/github.com/hashicorp/hil/walk.go266
-rw-r--r--vendor/github.com/hashicorp/logutils/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/logutils/README.md36
-rw-r--r--vendor/github.com/hashicorp/logutils/level.go81
-rw-r--r--vendor/github.com/hashicorp/terraform/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/terraform/README.md164
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/provider.go239
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_config.go308
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_disk.go99
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_file.go178
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_filesystem.go122
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_group.go57
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_networkd_unit.go60
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_raid.go69
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_systemd_unit.go104
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_user.go126
-rw-r--r--vendor/github.com/hashicorp/terraform/config/append.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config.go1096
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_string.go338
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_terraform.go117
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_tree.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/config/import_tree.go113
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate.go386
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go1346
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_walk.go283
-rw-r--r--vendor/github.com/hashicorp/terraform/config/lang.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/config/loader.go224
-rw-r--r--vendor/github.com/hashicorp/terraform/config/loader_hcl.go1091
-rw-r--r--vendor/github.com/hashicorp/terraform/config/merge.go193
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/copy_dir.go114
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/get.go71
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode_windows.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/module.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/testing.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/tree.go428
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/tree_gob.go57
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go118
-rw-r--r--vendor/github.com/hashicorp/terraform/config/provisioner_enums.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/config/raw_config.go335
-rw-r--r--vendor/github.com/hashicorp/terraform/config/resource_mode.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/config/resource_mode_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/config/testing.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/dag.go286
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/dot.go282
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/edge.go37
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/graph.go391
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/marshal.go462
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/set.go109
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/tarjan.go107
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/walk.go445
-rw-r--r--vendor/github.com/hashicorp/terraform/flatmap/expand.go147
-rw-r--r--vendor/github.com/hashicorp/terraform/flatmap/flatten.go71
-rw-r--r--vendor/github.com/hashicorp/terraform/flatmap/map.go82
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/README.md7
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/config/decode.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/config/validator.go214
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go154
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/experiment/id.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/logging/logging.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/error.go79
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/id.go39
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/map.go140
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/resource.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/state.go259
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing.go790
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go160
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go141
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/wait.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/README.md11
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/backend.go94
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/equal.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go334
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go333
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go208
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go232
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go319
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provider.go400
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go180
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource.go478
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go502
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go237
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/schema.go1537
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/serialize.go122
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/set.go209
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/testing.go30
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/closer.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go128
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go151
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go66
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/value.go79
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/plugin.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provider.go578
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go173
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/serve.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/ui_input.go51
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/ui_output.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context.go1022
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_components.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go32
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_import.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/debug.go523
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/diff.go866
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_apply.go359
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go347
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go208
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count.go58
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_diff.go478
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_error.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_filter.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_if.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_noop.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_output.go119
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provider.go164
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go47
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go139
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go55
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_resource.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_state.go324
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate.go227
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go74
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_variable.go279
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go119
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph.go172
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go141
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go67
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go161
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_dot.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk.go60
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go157
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook.go137
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_mock.go245
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_stop.go87
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/interpolate.go790
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go198
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go35
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go85
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go44
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go240
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go357
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go288
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go83
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go53
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go190
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/path.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/plan.go153
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource.go360
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_address.go301
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider.go204
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go297
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go72
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/semantics.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_components.go273
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_context.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go815
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go282
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state.go2118
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_add.go374
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_filter.go267
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go189
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go142
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_v1.go145
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/testing.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go68
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config.go135
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go168
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go257
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go269
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_diff.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_expand.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go241
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go120
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go110
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go64
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_output.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider.go380
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go206
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_reference.go321
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go51
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_root.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_state.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_targets.go144
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_variable.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go44
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/util.go93
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/variables.go166
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version.go31
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version_required.go69
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go16
-rw-r--r--vendor/github.com/hashicorp/yamux/LICENSE362
-rw-r--r--vendor/github.com/hashicorp/yamux/README.md86
-rw-r--r--vendor/github.com/hashicorp/yamux/addr.go60
-rw-r--r--vendor/github.com/hashicorp/yamux/const.go157
-rw-r--r--vendor/github.com/hashicorp/yamux/mux.go87
-rw-r--r--vendor/github.com/hashicorp/yamux/session.go623
-rw-r--r--vendor/github.com/hashicorp/yamux/stream.go457
-rw-r--r--vendor/github.com/hashicorp/yamux/util.go28
379 files changed, 60989 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md
new file mode 100644
index 00000000..1c95f597
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/README.md
@@ -0,0 +1,89 @@
+# errwrap
+
+`errwrap` is a package for Go that formalizes the pattern of wrapping errors
+and checking if an error contains another error.
+
+There is a common pattern in Go of taking a returned `error` value and
+then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
+with this pattern is that you completely lose the original `error` structure.
+
+Arguably the _correct_ approach is that you should make a custom structure
+implementing the `error` interface, and have the original error as a field
+on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
+This is a good approach, but you have to know the entire chain of possible
+rewrapping that happens, when you might just care about one.
+
+`errwrap` formalizes this pattern (it doesn't matter what approach you use
+above) by giving a single interface for wrapping errors, checking if a specific
+error is wrapped, and extracting that error.
+
+## Installation and Docs
+
+Install using `go get github.com/hashicorp/errwrap`.
+
+Full documentation is available at
+http://godoc.org/github.com/hashicorp/errwrap
+
+## Usage
+
+#### Basic Usage
+
+Below is a very basic example of its usage:
+
+```go
+// A function that always returns an error, but wraps it, like a real
+// function might.
+func tryOpen() error {
+ _, err := os.Open("/i/dont/exist")
+ if err != nil {
+ return errwrap.Wrapf("Doesn't exist: {{err}}", err)
+ }
+
+ return nil
+}
+
+func main() {
+ err := tryOpen()
+
+ // We can use the Contains helpers to check if an error contains
+ // another error. It is safe to do this with a nil error, or with
+ // an error that doesn't even use the errwrap package.
+ if errwrap.Contains(err, ErrNotExist) {
+ // Do something
+ }
+ if errwrap.ContainsType(err, new(os.PathError)) {
+ // Do something
+ }
+
+ // Or we can use the associated `Get` functions to just extract
+ // a specific error. This would return nil if that specific error doesn't
+ // exist.
+ perr := errwrap.GetType(err, new(os.PathError))
+}
+```
+
+#### Custom Types
+
+If you're already making custom types that properly wrap errors, then
+you can get all the functionality of `errwraps.Contains` and such by
+implementing the `Wrapper` interface with just one function. Example:
+
+```go
+type AppError {
+ Code ErrorCode
+ Err error
+}
+
+func (e *AppError) WrappedErrors() []error {
+ return []error{e.Err}
+}
+```
+
+Now this works:
+
+```go
+err := &AppError{Err: fmt.Errorf("an error")}
+if errwrap.ContainsType(err, fmt.Errorf("")) {
+ // This will work!
+}
+```
diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go
new file mode 100644
index 00000000..a733bef1
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/errwrap.go
@@ -0,0 +1,169 @@
+// Package errwrap implements methods to formalize error wrapping in Go.
+//
+// All of the top-level functions that take an `error` are built to be able
+// to take any error, not just wrapped errors. This allows you to use errwrap
+// without having to type-check and type-cast everywhere.
+package errwrap
+
+import (
+ "errors"
+ "reflect"
+ "strings"
+)
+
+// WalkFunc is the callback called for Walk.
+type WalkFunc func(error)
+
+// Wrapper is an interface that can be implemented by custom types to
+// have all the Contains, Get, etc. functions in errwrap work.
+//
+// When Walk reaches a Wrapper, it will call the callback for every
+// wrapped error in addition to the wrapper itself. Since all the top-level
+// functions in errwrap use Walk, this means that all those functions work
+// with your custom type.
+type Wrapper interface {
+ WrappedErrors() []error
+}
+
+// Wrap defines that outer wraps inner, returning an error type that
+// can be cleanly used with the other methods in this package, such as
+// Contains, GetAll, etc.
+//
+// This function won't modify the error message at all (the outer message
+// will be used).
+func Wrap(outer, inner error) error {
+ return &wrappedError{
+ Outer: outer,
+ Inner: inner,
+ }
+}
+
+// Wrapf wraps an error with a formatting message. This is similar to using
+// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
+// errors, you should replace it with this.
+//
+// format is the format of the error message. The string '{{err}}' will
+// be replaced with the original error message.
+func Wrapf(format string, err error) error {
+ outerMsg := "<nil>"
+ if err != nil {
+ outerMsg = err.Error()
+ }
+
+ outer := errors.New(strings.Replace(
+ format, "{{err}}", outerMsg, -1))
+
+ return Wrap(outer, err)
+}
+
+// Contains checks if the given error contains an error with the
+// message msg. If err is not a wrapped error, this will always return
+// false unless the error itself happens to match this msg.
+func Contains(err error, msg string) bool {
+ return len(GetAll(err, msg)) > 0
+}
+
+// ContainsType checks if the given error contains an error with
+// the same concrete type as v. If err is not a wrapped error, this will
+// check the err itself.
+func ContainsType(err error, v interface{}) bool {
+ return len(GetAllType(err, v)) > 0
+}
+
+// Get is the same as GetAll but returns the deepest matching error.
+func Get(err error, msg string) error {
+ es := GetAll(err, msg)
+ if len(es) > 0 {
+ return es[len(es)-1]
+ }
+
+ return nil
+}
+
+// GetType is the same as GetAllType but returns the deepest matching error.
+func GetType(err error, v interface{}) error {
+ es := GetAllType(err, v)
+ if len(es) > 0 {
+ return es[len(es)-1]
+ }
+
+ return nil
+}
+
+// GetAll gets all the errors that might be wrapped in err with the
+// given message. The order of the errors is such that the outermost
+// matching error (the most recent wrap) is index zero, and so on.
+func GetAll(err error, msg string) []error {
+ var result []error
+
+ Walk(err, func(err error) {
+ if err.Error() == msg {
+ result = append(result, err)
+ }
+ })
+
+ return result
+}
+
+// GetAllType gets all the errors that are the same type as v.
+//
+// The order of the return value is the same as described in GetAll.
+func GetAllType(err error, v interface{}) []error {
+ var result []error
+
+ var search string
+ if v != nil {
+ search = reflect.TypeOf(v).String()
+ }
+ Walk(err, func(err error) {
+ var needle string
+ if err != nil {
+ needle = reflect.TypeOf(err).String()
+ }
+
+ if needle == search {
+ result = append(result, err)
+ }
+ })
+
+ return result
+}
+
+// Walk walks all the wrapped errors in err and calls the callback. If
+// err isn't a wrapped error, this will be called once for err. If err
+// is a wrapped error, the callback will be called for both the wrapper
+// that implements error as well as the wrapped error itself.
+func Walk(err error, cb WalkFunc) {
+ if err == nil {
+ return
+ }
+
+ switch e := err.(type) {
+ case *wrappedError:
+ cb(e.Outer)
+ Walk(e.Inner, cb)
+ case Wrapper:
+ cb(err)
+
+ for _, err := range e.WrappedErrors() {
+ Walk(err, cb)
+ }
+ default:
+ cb(err)
+ }
+}
+
+// wrappedError is an implementation of error that has both the
+// outer and inner errors.
+type wrappedError struct {
+ Outer error
+ Inner error
+}
+
+func (w *wrappedError) Error() string {
+ return w.Outer.Error()
+}
+
+func (w *wrappedError) WrappedErrors() []error {
+ return []error{w.Outer, w.Inner}
+}
diff --git a/vendor/github.com/hashicorp/go-getter/LICENSE b/vendor/github.com/hashicorp/go-getter/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md
new file mode 100644
index 00000000..4a0b6a62
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/README.md
@@ -0,0 +1,253 @@
+# go-getter
+
+[![Build Status](http://img.shields.io/travis/hashicorp/go-getter.svg?style=flat-square)][travis]
+[![Build status](https://ci.appveyor.com/api/projects/status/ulq3qr43n62croyq/branch/master?svg=true)][appveyor]
+[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
+
+[travis]: http://travis-ci.org/hashicorp/go-getter
+[godocs]: http://godoc.org/github.com/hashicorp/go-getter
+[appveyor]: https://ci.appveyor.com/project/hashicorp/go-getter/branch/master
+
+go-getter is a library for Go (golang) for downloading files or directories
+from various sources using a URL as the primary form of input.
+
+The power of this library is being flexible in being able to download
+from a number of different sources (file paths, Git, HTTP, Mercurial, etc.)
+using a single string as input. This removes the burden of knowing how to
+download from a variety of sources from the implementer.
+
+The concept of a _detector_ automatically turns invalid URLs into proper
+URLs. For example: "github.com/hashicorp/go-getter" would turn into a
+Git URL. Or "./foo" would turn into a file URL. These are extensible.
+
+This library is used by [Terraform](https://terraform.io) for
+downloading modules, [Otto](https://ottoproject.io) for dependencies and
+Appfile imports, and [Nomad](https://nomadproject.io) for downloading
+binaries.
+
+## Installation and Usage
+
+Package documentation can be found on
+[GoDoc](http://godoc.org/github.com/hashicorp/go-getter).
+
+Installation can be done with a normal `go get`:
+
+```
+$ go get github.com/hashicorp/go-getter
+```
+
+go-getter also has a command you can use to test URL strings:
+
+```
+$ go install github.com/hashicorp/go-getter/cmd/go-getter
+...
+
+$ go-getter github.com/foo/bar ./foo
+...
+```
+
+The command is useful for verifying URL structures.
+
+## URL Format
+
+go-getter uses a single string URL as input to download from a variety of
+protocols. go-getter has various "tricks" with this URL to do certain things.
+This section documents the URL format.
+
+### Supported Protocols and Detectors
+
+**Protocols** are used to download files/directories using a specific
+mechanism. Example protocols are Git and HTTP.
+
+**Detectors** are used to transform a valid or invalid URL into another
+URL if it matches a certain pattern. Example: "github.com/user/repo" is
+automatically transformed into a fully valid Git URL. This allows go-getter
+to be very user friendly.
+
+go-getter out of the box supports the following protocols. Additional protocols
+can be augmented at runtime by implementing the `Getter` interface.
+
+ * Local files
+ * Git
+ * Mercurial
+ * HTTP
+ * Amazon S3
+
+In addition to the above protocols, go-getter has what are called "detectors."
+These take a URL and attempt to automatically choose the best protocol for
+it, which might involve even changing the protocol. The following detection
+is built-in by default:
+
+ * File paths such as "./foo" are automatically changed to absolute
+ file URLs.
+ * GitHub URLs, such as "github.com/mitchellh/vagrant" are automatically
+ changed to Git protocol over HTTP.
+ * BitBucket URLs, such as "bitbucket.org/mitchellh/vagrant" are automatically
+ changed to a Git or mercurial protocol using the BitBucket API.
+
+### Forced Protocol
+
+In some cases, the protocol to use is ambiguous depending on the source
+URL. For example, "http://github.com/mitchellh/vagrant.git" could reference
+an HTTP URL or a Git URL. Forced protocol syntax is used to disambiguate this
+URL.
+
+Forced protocol can be done by prefixing the URL with the protocol followed
+by double colons. For example: `git::http://github.com/mitchellh/vagrant.git`
+would download the given HTTP URL using the Git protocol.
+
+Forced protocols will also override any detectors.
+
+In the absense of a forced protocol, detectors may be run on the URL, transforming
+the protocol anyways. The above example would've used the Git protocol either
+way since the Git detector would've detected it was a GitHub URL.
+
+### Protocol-Specific Options
+
+Each protocol can support protocol-specific options to configure that
+protocol. For example, the `git` protocol supports specifying a `ref`
+query parameter that tells it what ref to checkout for that Git
+repository.
+
+The options are specified as query parameters on the URL (or URL-like string)
+given to go-getter. Using the Git example above, the URL below is a valid
+input to go-getter:
+
+ github.com/hashicorp/go-getter?ref=abcd1234
+
+The protocol-specific options are documented below the URL format
+section. But because they are part of the URL, we point it out here so
+you know they exist.
+
+### Checksumming
+
+For file downloads of any protocol, go-getter can automatically verify
+a checksum for you. Note that checksumming only works for downloading files,
+not directories, but checksumming will work for any protocol.
+
+To checksum a file, append a `checksum` query parameter to the URL.
+The paramter value should be in the format of `type:value`, where
+type is "md5", "sha1", "sha256", or "sha512". The "value" should be
+the actual checksum value. go-getter will parse out this query parameter
+automatically and use it to verify the checksum. An example URL
+is shown below:
+
+```
+./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21
+```
+
+The checksum query parameter is never sent to the backend protocol
+implementation. It is used at a higher level by go-getter itself.
+
+### Unarchiving
+
+go-getter will automatically unarchive files into a file or directory
+based on the extension of the file being requested (over any protocol).
+This works for both file and directory downloads.
+
+go-getter looks for an `archive` query parameter to specify the format of
+the archive. If this isn't specified, go-getter will use the extension of
+the path to see if it appears archived. Unarchiving can be explicitly
+disabled by setting the `archive` query parameter to `false`.
+
+The following archive formats are supported:
+
+ * `tar.gz` and `tgz`
+ * `tar.bz2` and `tbz2`
+ * `zip`
+ * `gz`
+ * `bz2`
+
+For example, an example URL is shown below:
+
+```
+./foo.zip
+```
+
+This will automatically be inferred to be a ZIP file and will be extracted.
+You can also be explicit about the archive type:
+
+```
+./some/other/path?archive=zip
+```
+
+And finally, you can disable archiving completely:
+
+```
+./some/path?archive=false
+```
+
+You can combine unarchiving with the other features of go-getter such
+as checksumming. The special `archive` query parameter will be removed
+from the URL before going to the final protocol downloader.
+
+## Protocol-Specific Options
+
+This section documents the protocol-specific options that can be specified
+for go-getter. These options should be appended to the input as normal query
+parameters. Depending on the usage of go-getter, applications may provide
+alternate ways of inputting options. For example, [Nomad](https://www.nomadproject.io)
+provides a nice options block for specifying options rather than in the URL.
+
+## General (All Protocols)
+
+The options below are available to all protocols:
+
+ * `archive` - The archive format to use to unarchive this file, or "" (empty
+ string) to disable unarchiving. For more details, see the complete section
+ on archive support above.
+
+ * `checksum` - Checksum to verify the downloaded file or archive. See
+ the entire section on checksumming above for format and more details.
+
+### Local Files (`file`)
+
+None
+
+### Git (`git`)
+
+ * `ref` - The Git ref to checkout. This is a ref, so it can point to
+ a commit SHA, a branch name, etc. If it is a named ref such as a branch
+ name, go-getter will update it to the latest on each get.
+
+ * `sshkey` - An SSH private key to use during clones. The provided key must
+ be a base64-encoded string. For example, to generate a suitable `sshkey`
+ from a private key file on disk, you would run `base64 -w0 <file>`.
+
+ **Note**: Git 2.3+ is required to use this feature.
+
+### Mercurial (`hg`)
+
+ * `rev` - The Mercurial revision to checkout.
+
+### HTTP (`http`)
+
+None
+
+### S3 (`s3`)
+
+S3 takes various access configurations in the URL. Note that it will also
+read these from standard AWS environment variables if they're set. If
+the query parameters are present, these take priority.
+
+ * `aws_access_key_id` - AWS access key.
+ * `aws_access_key_secret` - AWS access key secret.
+ * `aws_access_token` - AWS access token if this is being used.
+
+#### Using IAM Instance Profiles with S3
+
+If you use go-getter and want to use an EC2 IAM Instance Profile to avoid
+using credentials, then just omit these and the profile, if available will
+be used automatically.
+
+#### S3 Bucket Examples
+
+S3 has several addressing schemes used to reference your bucket. These are
+listed here: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
+
+Some examples for these addressing schemes:
+- s3::https://s3.amazonaws.com/bucket/foo
+- s3::https://s3-eu-west-1.amazonaws.com/bucket/foo
+- bucket.s3.amazonaws.com/foo
+- bucket.s3-eu-west-1.amazonaws.com/foo/bar
+
diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go
new file mode 100644
index 00000000..876812a0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/client.go
@@ -0,0 +1,335 @@
+package getter
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ urlhelper "github.com/hashicorp/go-getter/helper/url"
+)
+
+// Client is a client for downloading things.
+//
+// Top-level functions such as Get are shortcuts for interacting with a client.
+// Using a client directly allows more fine-grained control over how downloading
+// is done, as well as customizing the protocols supported.
+type Client struct {
+ // Src is the source URL to get.
+ //
+ // Dst is the path to save the downloaded thing as. If Dir is set to
+ // true, then this should be a directory. If the directory doesn't exist,
+ // it will be created for you.
+ //
+ // Pwd is the working directory for detection. If this isn't set, some
+ // detection may fail. Client will not default pwd to the current
+ // working directory for security reasons.
+ Src string
+ Dst string
+ Pwd string
+
+ // Mode is the method of download the client will use. See ClientMode
+ // for documentation.
+ Mode ClientMode
+
+ // Detectors is the list of detectors that are tried on the source.
+ // If this is nil, then the default Detectors will be used.
+ Detectors []Detector
+
+ // Decompressors is the map of decompressors supported by this client.
+ // If this is nil, then the default value is the Decompressors global.
+ Decompressors map[string]Decompressor
+
+ // Getters is the map of protocols supported by this client. If this
+ // is nil, then the default Getters variable will be used.
+ Getters map[string]Getter
+
+ // Dir, if true, tells the Client it is downloading a directory (versus
+ // a single file). This distinction is necessary since filenames and
+ // directory names follow the same format so disambiguating is impossible
+ // without knowing ahead of time.
+ //
+ // WARNING: deprecated. If Mode is set, that will take precedence.
+ Dir bool
+}
+
+// Get downloads the configured source to the destination.
+func (c *Client) Get() error {
+ // Store this locally since there are cases we swap this
+ mode := c.Mode
+ if mode == ClientModeInvalid {
+ if c.Dir {
+ mode = ClientModeDir
+ } else {
+ mode = ClientModeFile
+ }
+ }
+
+ // Default decompressor value
+ decompressors := c.Decompressors
+ if decompressors == nil {
+ decompressors = Decompressors
+ }
+
+ // Detect the URL. This is safe if it is already detected.
+ detectors := c.Detectors
+ if detectors == nil {
+ detectors = Detectors
+ }
+ src, err := Detect(c.Src, c.Pwd, detectors)
+ if err != nil {
+ return err
+ }
+
+ // Determine if we have a forced protocol, i.e. "git::http://..."
+ force, src := getForcedGetter(src)
+
+ // If there is a subdir component, then we download the root separately
+ // and then copy over the proper subdir.
+ var realDst string
+ dst := c.Dst
+ src, subDir := SourceDirSubdir(src)
+ if subDir != "" {
+ tmpDir, err := ioutil.TempDir("", "tf")
+ if err != nil {
+ return err
+ }
+ if err := os.RemoveAll(tmpDir); err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpDir)
+
+ realDst = dst
+ dst = tmpDir
+ }
+
+ u, err := urlhelper.Parse(src)
+ if err != nil {
+ return err
+ }
+ if force == "" {
+ force = u.Scheme
+ }
+
+ getters := c.Getters
+ if getters == nil {
+ getters = Getters
+ }
+
+ g, ok := getters[force]
+ if !ok {
+ return fmt.Errorf(
+ "download not supported for scheme '%s'", force)
+ }
+
+ // We have magic query parameters that we use to signal different features
+ q := u.Query()
+
+ // Determine if we have an archive type
+ archiveV := q.Get("archive")
+ if archiveV != "" {
+ // Delete the paramter since it is a magic parameter we don't
+ // want to pass on to the Getter
+ q.Del("archive")
+ u.RawQuery = q.Encode()
+
+ // If we can parse the value as a bool and it is false, then
+ // set the archive to "-" which should never map to a decompressor
+ if b, err := strconv.ParseBool(archiveV); err == nil && !b {
+ archiveV = "-"
+ }
+ }
+ if archiveV == "" {
+ // We don't appear to... but is it part of the filename?
+ matchingLen := 0
+ for k, _ := range decompressors {
+ if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen {
+ archiveV = k
+ matchingLen = len(k)
+ }
+ }
+ }
+
+ // If we have a decompressor, then we need to change the destination
+ // to download to a temporary path. We unarchive this into the final,
+ // real path.
+ var decompressDst string
+ var decompressDir bool
+ decompressor := decompressors[archiveV]
+ if decompressor != nil {
+ // Create a temporary directory to store our archive. We delete
+ // this at the end of everything.
+ td, err := ioutil.TempDir("", "getter")
+ if err != nil {
+ return fmt.Errorf(
+ "Error creating temporary directory for archive: %s", err)
+ }
+ defer os.RemoveAll(td)
+
+ // Swap the download directory to be our temporary path and
+ // store the old values.
+ decompressDst = dst
+ decompressDir = mode != ClientModeFile
+ dst = filepath.Join(td, "archive")
+ mode = ClientModeFile
+ }
+
+ // Determine if we have a checksum
+ var checksumHash hash.Hash
+ var checksumValue []byte
+ if v := q.Get("checksum"); v != "" {
+ // Delete the query parameter if we have it.
+ q.Del("checksum")
+ u.RawQuery = q.Encode()
+
+ // Determine the checksum hash type
+ checksumType := ""
+ idx := strings.Index(v, ":")
+ if idx > -1 {
+ checksumType = v[:idx]
+ }
+ switch checksumType {
+ case "md5":
+ checksumHash = md5.New()
+ case "sha1":
+ checksumHash = sha1.New()
+ case "sha256":
+ checksumHash = sha256.New()
+ case "sha512":
+ checksumHash = sha512.New()
+ default:
+ return fmt.Errorf(
+ "unsupported checksum type: %s", checksumType)
+ }
+
+ // Get the remainder of the value and parse it into bytes
+ b, err := hex.DecodeString(v[idx+1:])
+ if err != nil {
+ return fmt.Errorf("invalid checksum: %s", err)
+ }
+
+ // Set our value
+ checksumValue = b
+ }
+
+ if mode == ClientModeAny {
+ // Ask the getter which client mode to use
+ mode, err = g.ClientMode(u)
+ if err != nil {
+ return err
+ }
+
+ // Destination is the base name of the URL path in "any" mode when
+ // a file source is detected.
+ if mode == ClientModeFile {
+ dst = filepath.Join(dst, filepath.Base(u.Path))
+ }
+ }
+
+ // If we're not downloading a directory, then just download the file
+ // and return.
+ if mode == ClientModeFile {
+ err := g.GetFile(dst, u)
+ if err != nil {
+ return err
+ }
+
+ if checksumHash != nil {
+ if err := checksum(dst, checksumHash, checksumValue); err != nil {
+ return err
+ }
+ }
+
+ if decompressor != nil {
+ // We have a decompressor, so decompress the current destination
+ // into the final destination with the proper mode.
+ err := decompressor.Decompress(decompressDst, dst, decompressDir)
+ if err != nil {
+ return err
+ }
+
+ // Swap the information back
+ dst = decompressDst
+ if decompressDir {
+ mode = ClientModeAny
+ } else {
+ mode = ClientModeFile
+ }
+ }
+
+ // We check the dir value again because it can be switched back
+ // if we were unarchiving. If we're still only Get-ing a file, then
+ // we're done.
+ if mode == ClientModeFile {
+ return nil
+ }
+ }
+
+ // If we're at this point we're either downloading a directory or we've
+ // downloaded and unarchived a directory and we're just checking subdir.
+ // In the case we have a decompressor we don't Get because it was Get
+ // above.
+ if decompressor == nil {
+ // If we're getting a directory, then this is an error. You cannot
+ // checksum a directory. TODO: test
+ if checksumHash != nil {
+ return fmt.Errorf(
+ "checksum cannot be specified for directory download")
+ }
+
+ // We're downloading a directory, which might require a bit more work
+ // if we're specifying a subdir.
+ err := g.Get(dst, u)
+ if err != nil {
+ err = fmt.Errorf("error downloading '%s': %s", src, err)
+ return err
+ }
+ }
+
+ // If we have a subdir, copy that over
+ if subDir != "" {
+ if err := os.RemoveAll(realDst); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(realDst, 0755); err != nil {
+ return err
+ }
+
+ return copyDir(realDst, filepath.Join(dst, subDir), false)
+ }
+
+ return nil
+}
+
+// checksum is a simple method to compute the checksum of a source file
+// and compare it to the given expected value.
+func checksum(source string, h hash.Hash, v []byte) error {
+ f, err := os.Open(source)
+ if err != nil {
+ return fmt.Errorf("Failed to open file for checksum: %s", err)
+ }
+ defer f.Close()
+
+ if _, err := io.Copy(h, f); err != nil {
+ return fmt.Errorf("Failed to hash: %s", err)
+ }
+
+ if actual := h.Sum(nil); !bytes.Equal(actual, v) {
+ return fmt.Errorf(
+ "Checksums did not match.\nExpected: %s\nGot: %s",
+ hex.EncodeToString(v),
+ hex.EncodeToString(actual))
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/client_mode.go b/vendor/github.com/hashicorp/go-getter/client_mode.go
new file mode 100644
index 00000000..7f02509a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/client_mode.go
@@ -0,0 +1,24 @@
+package getter
+
+// ClientMode is the mode that the client operates in.
+type ClientMode uint
+
+const (
+ ClientModeInvalid ClientMode = iota
+
+ // ClientModeAny downloads anything it can. In this mode, dst must
+ // be a directory. If src is a file, it is saved into the directory
+ // with the basename of the URL. If src is a directory or archive,
+ // it is unpacked directly into dst.
+ ClientModeAny
+
+ // ClientModeFile downloads a single file. In this mode, dst must
+ // be a file path (doesn't have to exist). src must point to a single
+ // file. It is saved as dst.
+ ClientModeFile
+
+ // ClientModeDir downloads a directory. In this mode, dst must be
+ // a directory path (doesn't have to exist). src must point to an
+ // archive or directory (such as in s3).
+ ClientModeDir
+)
diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go
new file mode 100644
index 00000000..2f58e8ae
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/copy_dir.go
@@ -0,0 +1,78 @@
+package getter
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// copyDir copies the src directory contents into dst. Both directories
+// should already exist.
+//
+// If ignoreDot is set to true, then dot-prefixed files/folders are ignored.
+func copyDir(dst string, src string, ignoreDot bool) error {
+ src, err := filepath.EvalSymlinks(src)
+ if err != nil {
+ return err
+ }
+
+ walkFn := func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if path == src {
+ return nil
+ }
+
+ if ignoreDot && strings.HasPrefix(filepath.Base(path), ".") {
+ // Skip any dot files
+ if info.IsDir() {
+ return filepath.SkipDir
+ } else {
+ return nil
+ }
+ }
+
+ // The "path" has the src prefixed to it. We need to join our
+ // destination with the path without the src on it.
+ dstPath := filepath.Join(dst, path[len(src):])
+
+ // If we have a directory, make that subdirectory, then continue
+ // the walk.
+ if info.IsDir() {
+ if path == filepath.Join(src, dst) {
+ // dst is in src; don't walk it.
+ return nil
+ }
+
+ if err := os.MkdirAll(dstPath, 0755); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ // If we have a file, copy the contents.
+ srcF, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ dstF, err := os.Create(dstPath)
+ if err != nil {
+ return err
+ }
+ defer dstF.Close()
+
+ if _, err := io.Copy(dstF, srcF); err != nil {
+ return err
+ }
+
+ // Chmod it
+ return os.Chmod(dstPath, info.Mode())
+ }
+
+ return filepath.Walk(src, walkFn)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go
new file mode 100644
index 00000000..d18174cc
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress.go
@@ -0,0 +1,29 @@
+package getter
+
+// Decompressor defines the interface that must be implemented to add
+// support for decompressing a type.
+type Decompressor interface {
+ // Decompress should decompress src to dst. dir specifies whether dst
+ // is a directory or single file. src is guaranteed to be a single file
+ // that exists. dst is not guaranteed to exist already.
+ Decompress(dst, src string, dir bool) error
+}
+
+// Decompressors is the mapping of extension to the Decompressor implementation
+// that will decompress that extension/type.
+var Decompressors map[string]Decompressor
+
+func init() {
+ tbzDecompressor := new(TarBzip2Decompressor)
+ tgzDecompressor := new(TarGzipDecompressor)
+
+ Decompressors = map[string]Decompressor{
+ "bz2": new(Bzip2Decompressor),
+ "gz": new(GzipDecompressor),
+ "tar.bz2": tbzDecompressor,
+ "tar.gz": tgzDecompressor,
+ "tbz2": tbzDecompressor,
+ "tgz": tgzDecompressor,
+ "zip": new(ZipDecompressor),
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go
new file mode 100644
index 00000000..339f4cf7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go
@@ -0,0 +1,45 @@
+package getter
+
+import (
+ "compress/bzip2"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// Bzip2Decompressor is an implementation of Decompressor that can
+// decompress bz2 files.
+type Bzip2Decompressor struct{}
+
+func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error {
+ // Directory isn't supported at all
+ if dir {
+ return fmt.Errorf("bzip2-compressed files can only unarchive to a single file")
+ }
+
+ // If we're going into a directory we should make that first
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ // File first
+ f, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Bzip2 compression is second
+ bzipR := bzip2.NewReader(f)
+
+ // Copy it out
+ dstF, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer dstF.Close()
+
+ _, err = io.Copy(dstF, bzipR)
+ return err
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go
new file mode 100644
index 00000000..20010540
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go
@@ -0,0 +1,49 @@
+package getter
+
+import (
+ "compress/gzip"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// GzipDecompressor is an implementation of Decompressor that can
+// decompress bz2 files.
+type GzipDecompressor struct{}
+
+func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error {
+ // Directory isn't supported at all
+ if dir {
+ return fmt.Errorf("gzip-compressed files can only unarchive to a single file")
+ }
+
+ // If we're going into a directory we should make that first
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ // File first
+ f, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // gzip compression is second
+ gzipR, err := gzip.NewReader(f)
+ if err != nil {
+ return err
+ }
+ defer gzipR.Close()
+
+ // Copy it out
+ dstF, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer dstF.Close()
+
+ _, err = io.Copy(dstF, gzipR)
+ return err
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go
new file mode 100644
index 00000000..61f60431
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_tar.go
@@ -0,0 +1,83 @@
+package getter
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// untar is a shared helper for untarring an archive. The reader should provide
+// an uncompressed view of the tar archive.
+func untar(input io.Reader, dst, src string, dir bool) error {
+ tarR := tar.NewReader(input)
+ done := false
+ for {
+ hdr, err := tarR.Next()
+ if err == io.EOF {
+ if !done {
+ // Empty archive
+ return fmt.Errorf("empty archive: %s", src)
+ }
+
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ path := dst
+ if dir {
+ path = filepath.Join(path, hdr.Name)
+ }
+
+ if hdr.FileInfo().IsDir() {
+ if !dir {
+ return fmt.Errorf("expected a single file: %s", src)
+ }
+
+ // A directory, just make the directory and continue unarchiving...
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+
+ continue
+ } else {
+ // There is no ordering guarantee that a file in a directory is
+ // listed before the directory
+ dstPath := filepath.Dir(path)
+
+ // Check that the directory exists, otherwise create it
+ if _, err := os.Stat(dstPath); os.IsNotExist(err) {
+ if err := os.MkdirAll(dstPath, 0755); err != nil {
+ return err
+ }
+ }
+ }
+
+ // We have a file. If we already decoded, then it is an error
+ if !dir && done {
+ return fmt.Errorf("expected a single file, got multiple: %s", src)
+ }
+
+ // Mark that we're done so future in single file mode errors
+ done = true
+
+ // Open the file for writing
+ dstF, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(dstF, tarR)
+ dstF.Close()
+ if err != nil {
+ return err
+ }
+
+ // Chmod the file
+ if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil {
+ return err
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go
new file mode 100644
index 00000000..5391b5c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go
@@ -0,0 +1,33 @@
+package getter
+
+import (
+ "compress/bzip2"
+ "os"
+ "path/filepath"
+)
+
+// TarBzip2Decompressor is an implementation of Decompressor that can
+// decompress tar.bz2 files.
+type TarBzip2Decompressor struct{}
+
+func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error {
+ // If we're going into a directory we should make that first
+ mkdir := dst
+ if !dir {
+ mkdir = filepath.Dir(dst)
+ }
+ if err := os.MkdirAll(mkdir, 0755); err != nil {
+ return err
+ }
+
+ // File first
+ f, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Bzip2 compression is second
+ bzipR := bzip2.NewReader(f)
+ return untar(bzipR, dst, src, dir)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go
new file mode 100644
index 00000000..82b8ab4f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go
@@ -0,0 +1,135 @@
+package getter
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+
+ "github.com/mitchellh/go-testing-interface"
+)
+
+// TestDecompressCase is a single test case for testing decompressors
+type TestDecompressCase struct {
+ Input string // Input is the complete path to the input file
+ Dir bool // Dir is whether or not we're testing directory mode
+ Err bool // Err is whether we expect an error or not
+ DirList []string // DirList is the list of files for Dir mode
+ FileMD5 string // FileMD5 is the expected MD5 for a single file
+}
+
+// TestDecompressor is a helper function for testing generic decompressors.
+func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) {
+ for _, tc := range cases {
+ t.Logf("Testing: %s", tc.Input)
+
+ // Temporary dir to store stuff
+ td, err := ioutil.TempDir("", "getter")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Destination is always joining result so that we have a new path
+ dst := filepath.Join(td, "subdir", "result")
+
+ // We use a function so defers work
+ func() {
+ defer os.RemoveAll(td)
+
+ // Decompress
+ err := d.Decompress(dst, tc.Input, tc.Dir)
+ if (err != nil) != tc.Err {
+ t.Fatalf("err %s: %s", tc.Input, err)
+ }
+ if tc.Err {
+ return
+ }
+
+ // If it isn't a directory, then check for a single file
+ if !tc.Dir {
+ fi, err := os.Stat(dst)
+ if err != nil {
+ t.Fatalf("err %s: %s", tc.Input, err)
+ }
+ if fi.IsDir() {
+ t.Fatalf("err %s: expected file, got directory", tc.Input)
+ }
+ if tc.FileMD5 != "" {
+ actual := testMD5(t, dst)
+ expected := tc.FileMD5
+ if actual != expected {
+ t.Fatalf("err %s: expected MD5 %s, got %s", tc.Input, expected, actual)
+ }
+ }
+
+ return
+ }
+
+ // Convert expected for windows
+ expected := tc.DirList
+ if runtime.GOOS == "windows" {
+ for i, v := range expected {
+ expected[i] = strings.Replace(v, "/", "\\", -1)
+ }
+ }
+
+ // Directory, check for the correct contents
+ actual := testListDir(t, dst)
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected)
+ }
+ }()
+ }
+}
+
+func testListDir(t testing.T, path string) []string {
+ var result []string
+ err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ sub = strings.TrimPrefix(sub, path)
+ if sub == "" {
+ return nil
+ }
+ sub = sub[1:] // Trim the leading path sep.
+
+ // If it is a dir, add trailing sep
+ if info.IsDir() {
+ sub += string(os.PathSeparator)
+ }
+
+ result = append(result, sub)
+ return nil
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ sort.Strings(result)
+ return result
+}
+
+func testMD5(t testing.T, path string) string {
+ f, err := os.Open(path)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer f.Close()
+
+ h := md5.New()
+ _, err = io.Copy(h, f)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ result := h.Sum(nil)
+ return hex.EncodeToString(result)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go
new file mode 100644
index 00000000..65eb70dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go
@@ -0,0 +1,39 @@
+package getter
+
+import (
+ "compress/gzip"
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+// TarGzipDecompressor is an implementation of Decompressor that can
+// decompress tar.gzip files.
+type TarGzipDecompressor struct{}
+
+func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error {
+ // If we're going into a directory we should make that first
+ mkdir := dst
+ if !dir {
+ mkdir = filepath.Dir(dst)
+ }
+ if err := os.MkdirAll(mkdir, 0755); err != nil {
+ return err
+ }
+
+ // File first
+ f, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Gzip compression is second
+ gzipR, err := gzip.NewReader(f)
+ if err != nil {
+ return fmt.Errorf("Error opening a gzip reader for %s: %s", src, err)
+ }
+ defer gzipR.Close()
+
+ return untar(gzipR, dst, src, dir)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go
new file mode 100644
index 00000000..a065c076
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go
@@ -0,0 +1,96 @@
+package getter
+
+import (
+ "archive/zip"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// ZipDecompressor is an implementation of Decompressor that can
+// decompress tar.gzip files.
+type ZipDecompressor struct{}
+
+func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error {
+ // If we're going into a directory we should make that first
+ mkdir := dst
+ if !dir {
+ mkdir = filepath.Dir(dst)
+ }
+ if err := os.MkdirAll(mkdir, 0755); err != nil {
+ return err
+ }
+
+ // Open the zip
+ zipR, err := zip.OpenReader(src)
+ if err != nil {
+ return err
+ }
+ defer zipR.Close()
+
+ // Check the zip integrity
+ if len(zipR.File) == 0 {
+ // Empty archive
+ return fmt.Errorf("empty archive: %s", src)
+ }
+ if !dir && len(zipR.File) > 1 {
+ return fmt.Errorf("expected a single file: %s", src)
+ }
+
+ // Go through and unarchive
+ for _, f := range zipR.File {
+ path := dst
+ if dir {
+ path = filepath.Join(path, f.Name)
+ }
+
+ if f.FileInfo().IsDir() {
+ if !dir {
+ return fmt.Errorf("expected a single file: %s", src)
+ }
+
+ // A directory, just make the directory and continue unarchiving...
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+
+ continue
+ }
+
+ // Create the enclosing directories if we must. ZIP files aren't
+ // required to contain entries for just the directories so this
+ // can happen.
+ if dir {
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ }
+
+ // Open the file for reading
+ srcF, err := f.Open()
+ if err != nil {
+ return err
+ }
+
+ // Open the file for writing
+ dstF, err := os.Create(path)
+ if err != nil {
+ srcF.Close()
+ return err
+ }
+ _, err = io.Copy(dstF, srcF)
+ srcF.Close()
+ dstF.Close()
+ if err != nil {
+ return err
+ }
+
+ // Chmod the file
+ if err := os.Chmod(path, f.Mode()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go
new file mode 100644
index 00000000..481b737c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect.go
@@ -0,0 +1,97 @@
+package getter
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/hashicorp/go-getter/helper/url"
+)
+
+// Detector defines the interface that an invalid URL or a URL with a blank
+// scheme is passed through in order to determine if its shorthand for
+// something else well-known.
+type Detector interface {
+ // Detect will detect whether the string matches a known pattern to
+ // turn it into a proper URL.
+ Detect(string, string) (string, bool, error)
+}
+
+// Detectors is the list of detectors that are tried on an invalid URL.
+// This is also the order they're tried (index 0 is first).
+var Detectors []Detector
+
+func init() {
+ Detectors = []Detector{
+ new(GitHubDetector),
+ new(BitBucketDetector),
+ new(S3Detector),
+ new(FileDetector),
+ }
+}
+
+// Detect turns a source string into another source string if it is
+// detected to be of a known pattern.
+//
+// The third parameter should be the list of detectors to use in the
+// order to try them. If you don't want to configure this, just use
+// the global Detectors variable.
+//
+// This is safe to be called with an already valid source string: Detect
+// will just return it.
+func Detect(src string, pwd string, ds []Detector) (string, error) {
+ getForce, getSrc := getForcedGetter(src)
+
+ // Separate out the subdir if there is one, we don't pass that to detect
+ getSrc, subDir := SourceDirSubdir(getSrc)
+
+ u, err := url.Parse(getSrc)
+ if err == nil && u.Scheme != "" {
+ // Valid URL
+ return src, nil
+ }
+
+ for _, d := range ds {
+ result, ok, err := d.Detect(getSrc, pwd)
+ if err != nil {
+ return "", err
+ }
+ if !ok {
+ continue
+ }
+
+ var detectForce string
+ detectForce, result = getForcedGetter(result)
+ result, detectSubdir := SourceDirSubdir(result)
+
+ // If we have a subdir from the detection, then prepend it to our
+ // requested subdir.
+ if detectSubdir != "" {
+ if subDir != "" {
+ subDir = filepath.Join(detectSubdir, subDir)
+ } else {
+ subDir = detectSubdir
+ }
+ }
+ if subDir != "" {
+ u, err := url.Parse(result)
+ if err != nil {
+ return "", fmt.Errorf("Error parsing URL: %s", err)
+ }
+ u.Path += "//" + subDir
+ result = u.String()
+ }
+
+ // Preserve the forced getter if it exists. We try to use the
+ // original set force first, followed by any force set by the
+ // detector.
+ if getForce != "" {
+ result = fmt.Sprintf("%s::%s", getForce, result)
+ } else if detectForce != "" {
+ result = fmt.Sprintf("%s::%s", detectForce, result)
+ }
+
+ return result, nil
+ }
+
+ return "", fmt.Errorf("invalid source string: %s", src)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go
new file mode 100644
index 00000000..a183a17d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go
@@ -0,0 +1,66 @@
+package getter
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// BitBucketDetector implements Detector to detect BitBucket URLs and turn
+// them into URLs that the Git or Hg Getter can understand.
+type BitBucketDetector struct{}
+
+func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) {
+ if len(src) == 0 {
+ return "", false, nil
+ }
+
+ if strings.HasPrefix(src, "bitbucket.org/") {
+ return d.detectHTTP(src)
+ }
+
+ return "", false, nil
+}
+
+func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) {
+ u, err := url.Parse("https://" + src)
+ if err != nil {
+ return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err)
+ }
+
+ // We need to get info on this BitBucket repository to determine whether
+ // it is Git or Hg.
+ var info struct {
+ SCM string `json:"scm"`
+ }
+ infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path
+ resp, err := http.Get(infoUrl)
+ if err != nil {
+ return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
+ }
+ if resp.StatusCode == 403 {
+ // A private repo
+ return "", true, fmt.Errorf(
+ "shorthand BitBucket URL can't be used for private repos, " +
+ "please use a full URL")
+ }
+ dec := json.NewDecoder(resp.Body)
+ if err := dec.Decode(&info); err != nil {
+ return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
+ }
+
+ switch info.SCM {
+ case "git":
+ if !strings.HasSuffix(u.Path, ".git") {
+ u.Path += ".git"
+ }
+
+ return "git::" + u.String(), true, nil
+ case "hg":
+ return "hg::" + u.String(), true, nil
+ default:
+ return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go
new file mode 100644
index 00000000..756ea43f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_file.go
@@ -0,0 +1,67 @@
+package getter
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+// FileDetector implements Detector to detect file paths.
+type FileDetector struct{}
+
+func (d *FileDetector) Detect(src, pwd string) (string, bool, error) {
+ if len(src) == 0 {
+ return "", false, nil
+ }
+
+ if !filepath.IsAbs(src) {
+ if pwd == "" {
+ return "", true, fmt.Errorf(
+ "relative paths require a module with a pwd")
+ }
+
+ // Stat the pwd to determine if its a symbolic link. If it is,
+ // then the pwd becomes the original directory. Otherwise,
+ // `filepath.Join` below does some weird stuff.
+ //
+ // We just ignore if the pwd doesn't exist. That error will be
+ // caught later when we try to use the URL.
+ if fi, err := os.Lstat(pwd); !os.IsNotExist(err) {
+ if err != nil {
+ return "", true, err
+ }
+ if fi.Mode()&os.ModeSymlink != 0 {
+ pwd, err = os.Readlink(pwd)
+ if err != nil {
+ return "", true, err
+ }
+
+ // The symlink itself might be a relative path, so we have to
+ // resolve this to have a correctly rooted URL.
+ pwd, err = filepath.Abs(pwd)
+ if err != nil {
+ return "", true, err
+ }
+ }
+ }
+
+ src = filepath.Join(pwd, src)
+ }
+
+ return fmtFileURL(src), true, nil
+}
+
+func fmtFileURL(path string) string {
+ if runtime.GOOS == "windows" {
+ // Make sure we're using "/" on Windows. URLs are "/"-based.
+ path = filepath.ToSlash(path)
+ return fmt.Sprintf("file://%s", path)
+ }
+
+ // Make sure that we don't start with "/" since we add that below.
+ if path[0] == '/' {
+ path = path[1:]
+ }
+ return fmt.Sprintf("file:///%s", path)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go
new file mode 100644
index 00000000..c084ad9a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_github.go
@@ -0,0 +1,73 @@
+package getter
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// GitHubDetector implements Detector to detect GitHub URLs and turn
+// them into URLs that the Git Getter can understand.
+type GitHubDetector struct{}
+
+func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) {
+ if len(src) == 0 {
+ return "", false, nil
+ }
+
+ if strings.HasPrefix(src, "github.com/") {
+ return d.detectHTTP(src)
+ } else if strings.HasPrefix(src, "git@github.com:") {
+ return d.detectSSH(src)
+ }
+
+ return "", false, nil
+}
+
+func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) {
+ parts := strings.Split(src, "/")
+ if len(parts) < 3 {
+ return "", false, fmt.Errorf(
+ "GitHub URLs should be github.com/username/repo")
+ }
+
+ urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/"))
+ url, err := url.Parse(urlStr)
+ if err != nil {
+ return "", true, fmt.Errorf("error parsing GitHub URL: %s", err)
+ }
+
+ if !strings.HasSuffix(url.Path, ".git") {
+ url.Path += ".git"
+ }
+
+ if len(parts) > 3 {
+ url.Path += "//" + strings.Join(parts[3:], "/")
+ }
+
+ return "git::" + url.String(), true, nil
+}
+
+func (d *GitHubDetector) detectSSH(src string) (string, bool, error) {
+ idx := strings.Index(src, ":")
+ qidx := strings.Index(src, "?")
+ if qidx == -1 {
+ qidx = len(src)
+ }
+
+ var u url.URL
+ u.Scheme = "ssh"
+ u.User = url.User("git")
+ u.Host = "github.com"
+ u.Path = src[idx+1 : qidx]
+ if qidx < len(src) {
+ q, err := url.ParseQuery(src[qidx+1:])
+ if err != nil {
+ return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err)
+ }
+
+ u.RawQuery = q.Encode()
+ }
+
+ return "git::" + u.String(), true, nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_s3.go b/vendor/github.com/hashicorp/go-getter/detect_s3.go
new file mode 100644
index 00000000..8e0f4a03
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_s3.go
@@ -0,0 +1,61 @@
+package getter
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// S3Detector implements Detector to detect S3 URLs and turn
+// them into URLs that the S3 getter can understand.
+type S3Detector struct{}
+
+func (d *S3Detector) Detect(src, _ string) (string, bool, error) {
+ if len(src) == 0 {
+ return "", false, nil
+ }
+
+ if strings.Contains(src, ".amazonaws.com/") {
+ return d.detectHTTP(src)
+ }
+
+ return "", false, nil
+}
+
+func (d *S3Detector) detectHTTP(src string) (string, bool, error) {
+ parts := strings.Split(src, "/")
+ if len(parts) < 2 {
+ return "", false, fmt.Errorf(
+ "URL is not a valid S3 URL")
+ }
+
+ hostParts := strings.Split(parts[0], ".")
+ if len(hostParts) == 3 {
+ return d.detectPathStyle(hostParts[0], parts[1:])
+ } else if len(hostParts) == 4 {
+ return d.detectVhostStyle(hostParts[1], hostParts[0], parts[1:])
+ } else {
+ return "", false, fmt.Errorf(
+ "URL is not a valid S3 URL")
+ }
+}
+
+func (d *S3Detector) detectPathStyle(region string, parts []string) (string, bool, error) {
+ urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s", region, strings.Join(parts, "/"))
+ url, err := url.Parse(urlStr)
+ if err != nil {
+ return "", false, fmt.Errorf("error parsing S3 URL: %s", err)
+ }
+
+ return "s3::" + url.String(), true, nil
+}
+
+func (d *S3Detector) detectVhostStyle(region, bucket string, parts []string) (string, bool, error) {
+ urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s/%s", region, bucket, strings.Join(parts, "/"))
+ url, err := url.Parse(urlStr)
+ if err != nil {
+ return "", false, fmt.Errorf("error parsing S3 URL: %s", err)
+ }
+
+ return "s3::" + url.String(), true, nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/folder_storage.go b/vendor/github.com/hashicorp/go-getter/folder_storage.go
new file mode 100644
index 00000000..647ccf45
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/folder_storage.go
@@ -0,0 +1,65 @@
+package getter
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+// FolderStorage is an implementation of the Storage interface that manages
+// modules on the disk.
+type FolderStorage struct {
+ // StorageDir is the directory where the modules will be stored.
+ StorageDir string
+}
+
+// Dir implements Storage.Dir
+func (s *FolderStorage) Dir(key string) (d string, e bool, err error) {
+ d = s.dir(key)
+ _, err = os.Stat(d)
+ if err == nil {
+ // Directory exists
+ e = true
+ return
+ }
+ if os.IsNotExist(err) {
+ // Directory doesn't exist
+ d = ""
+ e = false
+ err = nil
+ return
+ }
+
+ // An error
+ d = ""
+ e = false
+ return
+}
+
+// Get implements Storage.Get
+func (s *FolderStorage) Get(key string, source string, update bool) error {
+ dir := s.dir(key)
+ if !update {
+ if _, err := os.Stat(dir); err == nil {
+ // If the directory already exists, then we're done since
+ // we're not updating.
+ return nil
+ } else if !os.IsNotExist(err) {
+ // If the error we got wasn't a file-not-exist error, then
+ // something went wrong and we should report it.
+ return fmt.Errorf("Error reading module directory: %s", err)
+ }
+ }
+
+ // Get the source. This always forces an update.
+ return Get(dir, source)
+}
+
+// dir returns the directory name internally that we'll use to map to
+// internally.
+func (s *FolderStorage) dir(key string) string {
+ sum := md5.Sum([]byte(key))
+ return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:]))
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go
new file mode 100644
index 00000000..c3236f55
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get.go
@@ -0,0 +1,139 @@
+// getter is a package for downloading files or directories from a variety of
+// protocols.
+//
+// getter is unique in its ability to download both directories and files.
+// It also detects certain source strings to be protocol-specific URLs. For
+// example, "github.com/hashicorp/go-getter" would turn into a Git URL and
+// use the Git protocol.
+//
+// Protocols and detectors are extensible.
+//
+// To get started, see Client.
+package getter
+
+import (
+ "bytes"
+ "fmt"
+ "net/url"
+ "os/exec"
+ "regexp"
+ "syscall"
+)
+
+// Getter defines the interface that schemes must implement to download
+// things.
+type Getter interface {
+ // Get downloads the given URL into the given directory. This always
+ // assumes that we're updating and gets the latest version that it can.
+ //
+ // The directory may already exist (if we're updating). If it is in a
+ // format that isn't understood, an error should be returned. Get shouldn't
+ // simply nuke the directory.
+ Get(string, *url.URL) error
+
+ // GetFile downloads the give URL into the given path. The URL must
+ // reference a single file. If possible, the Getter should check if
+ // the remote end contains the same file and no-op this operation.
+ GetFile(string, *url.URL) error
+
+ // ClientMode returns the mode based on the given URL. This is used to
+ // allow clients to let the getters decide which mode to use.
+ ClientMode(*url.URL) (ClientMode, error)
+}
+
+// Getters is the mapping of scheme to the Getter implementation that will
+// be used to get a dependency.
+var Getters map[string]Getter
+
+// forcedRegexp is the regular expression that finds forced getters. This
+// syntax is schema::url, example: git::https://foo.com
+var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`)
+
+func init() {
+ httpGetter := &HttpGetter{Netrc: true}
+
+ Getters = map[string]Getter{
+ "file": new(FileGetter),
+ "git": new(GitGetter),
+ "hg": new(HgGetter),
+ "s3": new(S3Getter),
+ "http": httpGetter,
+ "https": httpGetter,
+ }
+}
+
+// Get downloads the directory specified by src into the folder specified by
+// dst. If dst already exists, Get will attempt to update it.
+//
+// src is a URL, whereas dst is always just a file path to a folder. This
+// folder doesn't need to exist. It will be created if it doesn't exist.
+func Get(dst, src string) error {
+ return (&Client{
+ Src: src,
+ Dst: dst,
+ Dir: true,
+ Getters: Getters,
+ }).Get()
+}
+
+// GetAny downloads a URL into the given destination. Unlike Get or
+// GetFile, both directories and files are supported.
+//
+// dst must be a directory. If src is a file, it will be downloaded
+// into dst with the basename of the URL. If src is a directory or
+// archive, it will be unpacked directly into dst.
+func GetAny(dst, src string) error {
+ return (&Client{
+ Src: src,
+ Dst: dst,
+ Mode: ClientModeAny,
+ Getters: Getters,
+ }).Get()
+}
+
+// GetFile downloads the file specified by src into the path specified by
+// dst.
+func GetFile(dst, src string) error {
+ return (&Client{
+ Src: src,
+ Dst: dst,
+ Dir: false,
+ Getters: Getters,
+ }).Get()
+}
+
+// getRunCommand is a helper that will run a command and capture the output
+// in the case an error happens.
+func getRunCommand(cmd *exec.Cmd) error {
+ var buf bytes.Buffer
+ cmd.Stdout = &buf
+ cmd.Stderr = &buf
+ err := cmd.Run()
+ if err == nil {
+ return nil
+ }
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ // The program has exited with an exit code != 0
+ if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+ return fmt.Errorf(
+ "%s exited with %d: %s",
+ cmd.Path,
+ status.ExitStatus(),
+ buf.String())
+ }
+ }
+
+ return fmt.Errorf("error running %s: %s", cmd.Path, buf.String())
+}
+
+// getForcedGetter takes a source and returns the tuple of the forced
+// getter and the raw URL (without the force syntax).
+func getForcedGetter(src string) (string, string) {
+ var forced string
+ if ms := forcedRegexp.FindStringSubmatch(src); ms != nil {
+ forced = ms[1]
+ src = ms[2]
+ }
+
+ return forced, src
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go
new file mode 100644
index 00000000..e5d2d61d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_file.go
@@ -0,0 +1,32 @@
+package getter
+
+import (
+ "net/url"
+ "os"
+)
+
+// FileGetter is a Getter implementation that will download a module from
+// a file scheme.
+type FileGetter struct {
+ // Copy, if set to true, will copy data instead of using a symlink
+ Copy bool
+}
+
+func (g *FileGetter) ClientMode(u *url.URL) (ClientMode, error) {
+ path := u.Path
+ if u.RawPath != "" {
+ path = u.RawPath
+ }
+
+ fi, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Check if the source is a directory.
+ if fi.IsDir() {
+ return ClientModeDir, nil
+ }
+
+ return ClientModeFile, nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go
new file mode 100644
index 00000000..c89a2d5a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_file_unix.go
@@ -0,0 +1,103 @@
+// +build !windows
+
+package getter
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+)
+
+func (g *FileGetter) Get(dst string, u *url.URL) error {
+ path := u.Path
+ if u.RawPath != "" {
+ path = u.RawPath
+ }
+
+ // The source path must exist and be a directory to be usable.
+ if fi, err := os.Stat(path); err != nil {
+ return fmt.Errorf("source path error: %s", err)
+ } else if !fi.IsDir() {
+ return fmt.Errorf("source path must be a directory")
+ }
+
+ fi, err := os.Lstat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ // If the destination already exists, it must be a symlink
+ if err == nil {
+ mode := fi.Mode()
+ if mode&os.ModeSymlink == 0 {
+ return fmt.Errorf("destination exists and is not a symlink")
+ }
+
+ // Remove the destination
+ if err := os.Remove(dst); err != nil {
+ return err
+ }
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ return os.Symlink(path, dst)
+}
+
+func (g *FileGetter) GetFile(dst string, u *url.URL) error {
+ path := u.Path
+ if u.RawPath != "" {
+ path = u.RawPath
+ }
+
+ // The source path must exist and be a file to be usable.
+ if fi, err := os.Stat(path); err != nil {
+ return fmt.Errorf("source path error: %s", err)
+ } else if fi.IsDir() {
+ return fmt.Errorf("source path must be a file")
+ }
+
+ _, err := os.Lstat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ // If the destination already exists, it must be a symlink
+ if err == nil {
+ // Remove the destination
+ if err := os.Remove(dst); err != nil {
+ return err
+ }
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ // If we're not copying, just symlink and we're done
+ if !g.Copy {
+ return os.Symlink(path, dst)
+ }
+
+ // Copy
+ srcF, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ dstF, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer dstF.Close()
+
+ _, err = io.Copy(dstF, srcF)
+ return err
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go
new file mode 100644
index 00000000..f87ed0a0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_file_windows.go
@@ -0,0 +1,120 @@
+// +build windows
+
+package getter
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+func (g *FileGetter) Get(dst string, u *url.URL) error {
+ path := u.Path
+ if u.RawPath != "" {
+ path = u.RawPath
+ }
+
+ // The source path must exist and be a directory to be usable.
+ if fi, err := os.Stat(path); err != nil {
+ return fmt.Errorf("source path error: %s", err)
+ } else if !fi.IsDir() {
+ return fmt.Errorf("source path must be a directory")
+ }
+
+ fi, err := os.Lstat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ // If the destination already exists, it must be a symlink
+ if err == nil {
+ mode := fi.Mode()
+ if mode&os.ModeSymlink == 0 {
+ return fmt.Errorf("destination exists and is not a symlink")
+ }
+
+ // Remove the destination
+ if err := os.Remove(dst); err != nil {
+ return err
+ }
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ sourcePath := toBackslash(path)
+
+ // Use mklink to create a junction point
+ output, err := exec.Command("cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output)
+ }
+
+ return nil
+}
+
+func (g *FileGetter) GetFile(dst string, u *url.URL) error {
+ path := u.Path
+ if u.RawPath != "" {
+ path = u.RawPath
+ }
+
+ // The source path must exist and be a directory to be usable.
+ if fi, err := os.Stat(path); err != nil {
+ return fmt.Errorf("source path error: %s", err)
+ } else if fi.IsDir() {
+ return fmt.Errorf("source path must be a file")
+ }
+
+ _, err := os.Lstat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ // If the destination already exists, it must be a symlink
+ if err == nil {
+ // Remove the destination
+ if err := os.Remove(dst); err != nil {
+ return err
+ }
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ // If we're not copying, just symlink and we're done
+ if !g.Copy {
+ return os.Symlink(path, dst)
+ }
+
+ // Copy
+ srcF, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ dstF, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer dstF.Close()
+
+ _, err = io.Copy(dstF, srcF)
+ return err
+}
+
+// toBackslash returns the result of replacing each slash character
+// in path with a backslash ('\') character. Multiple separators are
+// replaced by multiple backslashes.
+func toBackslash(path string) string {
+ return strings.Replace(path, "/", "\\", -1)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go
new file mode 100644
index 00000000..07281398
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_git.go
@@ -0,0 +1,225 @@
+package getter
+
+import (
+ "encoding/base64"
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ urlhelper "github.com/hashicorp/go-getter/helper/url"
+ "github.com/hashicorp/go-version"
+)
+
+// GitGetter is a Getter implementation that will download a module from
+// a git repository.
+type GitGetter struct{}
+
+func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) {
+ return ClientModeDir, nil
+}
+
+func (g *GitGetter) Get(dst string, u *url.URL) error {
+ if _, err := exec.LookPath("git"); err != nil {
+ return fmt.Errorf("git must be available and on the PATH")
+ }
+
+ // Extract some query parameters we use
+ var ref, sshKey string
+ q := u.Query()
+ if len(q) > 0 {
+ ref = q.Get("ref")
+ q.Del("ref")
+
+ sshKey = q.Get("sshkey")
+ q.Del("sshkey")
+
+ // Copy the URL
+ var newU url.URL = *u
+ u = &newU
+ u.RawQuery = q.Encode()
+ }
+
+ var sshKeyFile string
+ if sshKey != "" {
+ // Check that the git version is sufficiently new.
+ if err := checkGitVersion("2.3"); err != nil {
+ return fmt.Errorf("Error using ssh key: %v", err)
+ }
+
+ // We have an SSH key - decode it.
+ raw, err := base64.StdEncoding.DecodeString(sshKey)
+ if err != nil {
+ return err
+ }
+
+ // Create a temp file for the key and ensure it is removed.
+ fh, err := ioutil.TempFile("", "go-getter")
+ if err != nil {
+ return err
+ }
+ sshKeyFile = fh.Name()
+ defer os.Remove(sshKeyFile)
+
+ // Set the permissions prior to writing the key material.
+ if err := os.Chmod(sshKeyFile, 0600); err != nil {
+ return err
+ }
+
+ // Write the raw key into the temp file.
+ _, err = fh.Write(raw)
+ fh.Close()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Clone or update the repository
+ _, err := os.Stat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ err = g.update(dst, sshKeyFile, ref)
+ } else {
+ err = g.clone(dst, sshKeyFile, u)
+ }
+ if err != nil {
+ return err
+ }
+
+ // Next: check out the proper tag/branch if it is specified, and checkout
+ if ref != "" {
+ if err := g.checkout(dst, ref); err != nil {
+ return err
+ }
+ }
+
+ // Lastly, download any/all submodules.
+ return g.fetchSubmodules(dst, sshKeyFile)
+}
+
+// GetFile for Git doesn't support updating at this time. It will download
+// the file every time.
+func (g *GitGetter) GetFile(dst string, u *url.URL) error {
+ td, err := ioutil.TempDir("", "getter-git")
+ if err != nil {
+ return err
+ }
+ if err := os.RemoveAll(td); err != nil {
+ return err
+ }
+
+ // Get the filename, and strip the filename from the URL so we can
+ // just get the repository directly.
+ filename := filepath.Base(u.Path)
+ u.Path = filepath.Dir(u.Path)
+
+ // Get the full repository
+ if err := g.Get(td, u); err != nil {
+ return err
+ }
+
+ // Copy the single file
+ u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename)))
+ if err != nil {
+ return err
+ }
+
+ fg := &FileGetter{Copy: true}
+ return fg.GetFile(dst, u)
+}
+
+func (g *GitGetter) checkout(dst string, ref string) error {
+ cmd := exec.Command("git", "checkout", ref)
+ cmd.Dir = dst
+ return getRunCommand(cmd)
+}
+
+func (g *GitGetter) clone(dst, sshKeyFile string, u *url.URL) error {
+ cmd := exec.Command("git", "clone", u.String(), dst)
+ setupGitEnv(cmd, sshKeyFile)
+ return getRunCommand(cmd)
+}
+
+func (g *GitGetter) update(dst, sshKeyFile, ref string) error {
+ // Determine if we're a branch. If we're NOT a branch, then we just
+ // switch to master prior to checking out
+ cmd := exec.Command("git", "show-ref", "-q", "--verify", "refs/heads/"+ref)
+ cmd.Dir = dst
+
+ if getRunCommand(cmd) != nil {
+ // Not a branch, switch to master. This will also catch non-existent
+ // branches, in which case we want to switch to master and then
+ // checkout the proper branch later.
+ ref = "master"
+ }
+
+ // We have to be on a branch to pull
+ if err := g.checkout(dst, ref); err != nil {
+ return err
+ }
+
+ cmd = exec.Command("git", "pull", "--ff-only")
+ cmd.Dir = dst
+ setupGitEnv(cmd, sshKeyFile)
+ return getRunCommand(cmd)
+}
+
+// fetchSubmodules downloads any configured submodules recursively.
+func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error {
+ cmd := exec.Command("git", "submodule", "update", "--init", "--recursive")
+ cmd.Dir = dst
+ setupGitEnv(cmd, sshKeyFile)
+ return getRunCommand(cmd)
+}
+
+// setupGitEnv sets up the environment for the given command. This is used to
+// pass configuration data to git and ssh and enables advanced cloning methods.
+func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) {
+ var sshOpts []string
+
+ if sshKeyFile != "" {
+ // We have an SSH key temp file configured, tell ssh about this.
+ sshOpts = append(sshOpts, "-i", sshKeyFile)
+ }
+
+ cmd.Env = append(os.Environ(),
+ // Set the ssh command to use for clones.
+ "GIT_SSH_COMMAND=ssh "+strings.Join(sshOpts, " "),
+ )
+}
+
+// checkGitVersion is used to check the version of git installed on the system
+// against a known minimum version. Returns an error if the installed version
+// is older than the given minimum.
+func checkGitVersion(min string) error {
+ want, err := version.NewVersion(min)
+ if err != nil {
+ return err
+ }
+
+ out, err := exec.Command("git", "version").Output()
+ if err != nil {
+ return err
+ }
+
+ fields := strings.Fields(string(out))
+ if len(fields) != 3 {
+ return fmt.Errorf("Unexpected 'git version' output: %q", string(out))
+ }
+
+ have, err := version.NewVersion(fields[2])
+ if err != nil {
+ return err
+ }
+
+ if have.LessThan(want) {
+ return fmt.Errorf("Required git version = %s, have %s", want, have)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go
new file mode 100644
index 00000000..820bdd48
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_hg.go
@@ -0,0 +1,131 @@
+package getter
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+
+ urlhelper "github.com/hashicorp/go-getter/helper/url"
+)
+
+// HgGetter is a Getter implementation that will download a module from
+// a Mercurial repository.
+type HgGetter struct{}
+
+func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) {
+ return ClientModeDir, nil
+}
+
+func (g *HgGetter) Get(dst string, u *url.URL) error {
+ if _, err := exec.LookPath("hg"); err != nil {
+ return fmt.Errorf("hg must be available and on the PATH")
+ }
+
+ newURL, err := urlhelper.Parse(u.String())
+ if err != nil {
+ return err
+ }
+ if fixWindowsDrivePath(newURL) {
+ // See valid file path form on http://www.selenic.com/hg/help/urls
+ newURL.Path = fmt.Sprintf("/%s", newURL.Path)
+ }
+
+ // Extract some query parameters we use
+ var rev string
+ q := newURL.Query()
+ if len(q) > 0 {
+ rev = q.Get("rev")
+ q.Del("rev")
+
+ newURL.RawQuery = q.Encode()
+ }
+
+ _, err = os.Stat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err != nil {
+ if err := g.clone(dst, newURL); err != nil {
+ return err
+ }
+ }
+
+ if err := g.pull(dst, newURL); err != nil {
+ return err
+ }
+
+ return g.update(dst, newURL, rev)
+}
+
+// GetFile for Hg doesn't support updating at this time. It will download
+// the file every time.
+func (g *HgGetter) GetFile(dst string, u *url.URL) error {
+ td, err := ioutil.TempDir("", "getter-hg")
+ if err != nil {
+ return err
+ }
+ if err := os.RemoveAll(td); err != nil {
+ return err
+ }
+
+ // Get the filename, and strip the filename from the URL so we can
+ // just get the repository directly.
+ filename := filepath.Base(u.Path)
+ u.Path = filepath.ToSlash(filepath.Dir(u.Path))
+
+ // If we're on Windows, we need to set the host to "localhost" for hg
+ if runtime.GOOS == "windows" {
+ u.Host = "localhost"
+ }
+
+ // Get the full repository
+ if err := g.Get(td, u); err != nil {
+ return err
+ }
+
+ // Copy the single file
+ u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename)))
+ if err != nil {
+ return err
+ }
+
+ fg := &FileGetter{Copy: true}
+ return fg.GetFile(dst, u)
+}
+
+func (g *HgGetter) clone(dst string, u *url.URL) error {
+ cmd := exec.Command("hg", "clone", "-U", u.String(), dst)
+ return getRunCommand(cmd)
+}
+
+func (g *HgGetter) pull(dst string, u *url.URL) error {
+ cmd := exec.Command("hg", "pull")
+ cmd.Dir = dst
+ return getRunCommand(cmd)
+}
+
+func (g *HgGetter) update(dst string, u *url.URL, rev string) error {
+ args := []string{"update"}
+ if rev != "" {
+ args = append(args, rev)
+ }
+
+ cmd := exec.Command("hg", args...)
+ cmd.Dir = dst
+ return getRunCommand(cmd)
+}
+
+func fixWindowsDrivePath(u *url.URL) bool {
+ // hg assumes a file:/// prefix for Windows drive letter file paths.
+ // (e.g. file:///c:/foo/bar)
+ // If the URL Path does not begin with a '/' character, the resulting URL
+ // path will have a file:// prefix. (e.g. file://c:/foo/bar)
+ // See http://www.selenic.com/hg/help/urls and the examples listed in
+ // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936
+ return runtime.GOOS == "windows" && u.Scheme == "file" &&
+ len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':'
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go
new file mode 100644
index 00000000..661d8989
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_http.go
@@ -0,0 +1,227 @@
+package getter
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// HttpGetter is a Getter implementation that will download from an HTTP
+// endpoint.
+//
+// For file downloads, HTTP is used directly.
+//
+// The protocol for downloading a directory from an HTTP endpoing is as follows:
+//
+// An HTTP GET request is made to the URL with the additional GET parameter
+// "terraform-get=1". This lets you handle that scenario specially if you
+// wish. The response must be a 2xx.
+//
+// First, a header is looked for "X-Terraform-Get" which should contain
+// a source URL to download.
+//
+// If the header is not present, then a meta tag is searched for named
+// "terraform-get" and the content should be a source URL.
+//
+// The source URL, whether from the header or meta tag, must be a fully
+// formed URL. The shorthand syntax of "github.com/foo/bar" or relative
+// paths are not allowed.
+type HttpGetter struct {
+ // Netrc, if true, will lookup and use auth information found
+ // in the user's netrc file if available.
+ Netrc bool
+}
+
+func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) {
+ if strings.HasSuffix(u.Path, "/") {
+ return ClientModeDir, nil
+ }
+ return ClientModeFile, nil
+}
+
+func (g *HttpGetter) Get(dst string, u *url.URL) error {
+ // Copy the URL so we can modify it
+ var newU url.URL = *u
+ u = &newU
+
+ if g.Netrc {
+ // Add auth from netrc if we can
+ if err := addAuthFromNetrc(u); err != nil {
+ return err
+ }
+ }
+
+ // Add terraform-get to the parameter.
+ q := u.Query()
+ q.Add("terraform-get", "1")
+ u.RawQuery = q.Encode()
+
+ // Get the URL
+ resp, err := http.Get(u.String())
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+ return fmt.Errorf("bad response code: %d", resp.StatusCode)
+ }
+
+ // Extract the source URL
+ var source string
+ if v := resp.Header.Get("X-Terraform-Get"); v != "" {
+ source = v
+ } else {
+ source, err = g.parseMeta(resp.Body)
+ if err != nil {
+ return err
+ }
+ }
+ if source == "" {
+ return fmt.Errorf("no source URL was returned")
+ }
+
+ // If there is a subdir component, then we download the root separately
+ // into a temporary directory, then copy over the proper subdir.
+ source, subDir := SourceDirSubdir(source)
+ if subDir == "" {
+ return Get(dst, source)
+ }
+
+ // We have a subdir, time to jump some hoops
+ return g.getSubdir(dst, source, subDir)
+}
+
+func (g *HttpGetter) GetFile(dst string, u *url.URL) error {
+
+ if g.Netrc {
+ // Add auth from netrc if we can
+ if err := addAuthFromNetrc(u); err != nil {
+ return err
+ }
+ }
+
+ resp, err := http.Get(u.String())
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return fmt.Errorf("bad response code: %d", resp.StatusCode)
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ f, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ _, err = io.Copy(f, resp.Body)
+ return err
+}
+
+// getSubdir downloads the source into the destination, but with
+// the proper subdir.
+func (g *HttpGetter) getSubdir(dst, source, subDir string) error {
+ // Create a temporary directory to store the full source
+ td, err := ioutil.TempDir("", "tf")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(td)
+
+ // Download that into the given directory
+ if err := Get(td, source); err != nil {
+ return err
+ }
+
+ // Make sure the subdir path actually exists
+ sourcePath := filepath.Join(td, subDir)
+ if _, err := os.Stat(sourcePath); err != nil {
+ return fmt.Errorf(
+ "Error downloading %s: %s", source, err)
+ }
+
+ // Copy the subdirectory into our actual destination.
+ if err := os.RemoveAll(dst); err != nil {
+ return err
+ }
+
+ // Make the final destination
+ if err := os.MkdirAll(dst, 0755); err != nil {
+ return err
+ }
+
+ return copyDir(dst, sourcePath, false)
+}
+
+// parseMeta looks for the first meta tag in the given reader that
+// will give us the source URL.
+func (g *HttpGetter) parseMeta(r io.Reader) (string, error) {
+ d := xml.NewDecoder(r)
+ d.CharsetReader = charsetReader
+ d.Strict = false
+ var err error
+ var t xml.Token
+ for {
+ t, err = d.Token()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ return "", err
+ }
+ if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") {
+ return "", nil
+ }
+ if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") {
+ return "", nil
+ }
+ e, ok := t.(xml.StartElement)
+ if !ok || !strings.EqualFold(e.Name.Local, "meta") {
+ continue
+ }
+ if attrValue(e.Attr, "name") != "terraform-get" {
+ continue
+ }
+ if f := attrValue(e.Attr, "content"); f != "" {
+ return f, nil
+ }
+ }
+}
+
+// attrValue returns the attribute value for the case-insensitive key
+// `name', or the empty string if nothing is found.
+func attrValue(attrs []xml.Attr, name string) string {
+ for _, a := range attrs {
+ if strings.EqualFold(a.Name.Local, name) {
+ return a.Value
+ }
+ }
+ return ""
+}
+
+// charsetReader returns a reader for the given charset. Currently
+// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful
+// error which is printed by go get, so the user can find why the package
+// wasn't downloaded if the encoding is not supported. Note that, in
+// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters
+// greater than 0x7f are not rejected).
+func charsetReader(charset string, input io.Reader) (io.Reader, error) {
+ switch strings.ToLower(charset) {
+ case "ascii":
+ return input, nil
+ default:
+ return nil, fmt.Errorf("can't decode XML document using charset %q", charset)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go
new file mode 100644
index 00000000..882e694d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_mock.go
@@ -0,0 +1,52 @@
+package getter
+
+import (
+ "net/url"
+)
+
+// MockGetter is an implementation of Getter that can be used for tests.
+type MockGetter struct {
+ // Proxy, if set, will be called after recording the calls below.
+ // If it isn't set, then the *Err values will be returned.
+ Proxy Getter
+
+ GetCalled bool
+ GetDst string
+ GetURL *url.URL
+ GetErr error
+
+ GetFileCalled bool
+ GetFileDst string
+ GetFileURL *url.URL
+ GetFileErr error
+}
+
+func (g *MockGetter) Get(dst string, u *url.URL) error {
+ g.GetCalled = true
+ g.GetDst = dst
+ g.GetURL = u
+
+ if g.Proxy != nil {
+ return g.Proxy.Get(dst, u)
+ }
+
+ return g.GetErr
+}
+
+func (g *MockGetter) GetFile(dst string, u *url.URL) error {
+ g.GetFileCalled = true
+ g.GetFileDst = dst
+ g.GetFileURL = u
+
+ if g.Proxy != nil {
+ return g.Proxy.GetFile(dst, u)
+ }
+ return g.GetFileErr
+}
+
+func (g *MockGetter) ClientMode(u *url.URL) (ClientMode, error) {
+ if l := len(u.Path); l > 0 && u.Path[l-1:] == "/" {
+ return ClientModeDir, nil
+ }
+ return ClientModeFile, nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go
new file mode 100644
index 00000000..d3bffeb1
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_s3.go
@@ -0,0 +1,243 @@
+package getter
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+// S3Getter is a Getter implementation that will download a module from
+// a S3 bucket.
+type S3Getter struct{}
+
+func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) {
+ // Parse URL
+ region, bucket, path, _, creds, err := g.parseUrl(u)
+ if err != nil {
+ return 0, err
+ }
+
+ // Create client config
+ config := g.getAWSConfig(region, creds)
+ sess := session.New(config)
+ client := s3.New(sess)
+
+ // List the object(s) at the given prefix
+ req := &s3.ListObjectsInput{
+ Bucket: aws.String(bucket),
+ Prefix: aws.String(path),
+ }
+ resp, err := client.ListObjects(req)
+ if err != nil {
+ return 0, err
+ }
+
+ for _, o := range resp.Contents {
+ // Use file mode on exact match.
+ if *o.Key == path {
+ return ClientModeFile, nil
+ }
+
+ // Use dir mode if child keys are found.
+ if strings.HasPrefix(*o.Key, path+"/") {
+ return ClientModeDir, nil
+ }
+ }
+
+ // There was no match, so just return file mode. The download is going
+ // to fail but we will let S3 return the proper error later.
+ return ClientModeFile, nil
+}
+
+func (g *S3Getter) Get(dst string, u *url.URL) error {
+ // Parse URL
+ region, bucket, path, _, creds, err := g.parseUrl(u)
+ if err != nil {
+ return err
+ }
+
+ // Remove destination if it already exists
+ _, err = os.Stat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ if err == nil {
+ // Remove the destination
+ if err := os.RemoveAll(dst); err != nil {
+ return err
+ }
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ config := g.getAWSConfig(region, creds)
+ sess := session.New(config)
+ client := s3.New(sess)
+
+ // List files in path, keep listing until no more objects are found
+ lastMarker := ""
+ hasMore := true
+ for hasMore {
+ req := &s3.ListObjectsInput{
+ Bucket: aws.String(bucket),
+ Prefix: aws.String(path),
+ }
+ if lastMarker != "" {
+ req.Marker = aws.String(lastMarker)
+ }
+
+ resp, err := client.ListObjects(req)
+ if err != nil {
+ return err
+ }
+
+ hasMore = aws.BoolValue(resp.IsTruncated)
+
+ // Get each object storing each file relative to the destination path
+ for _, object := range resp.Contents {
+ lastMarker = aws.StringValue(object.Key)
+ objPath := aws.StringValue(object.Key)
+
+ // If the key ends with a backslash assume it is a directory and ignore
+ if strings.HasSuffix(objPath, "/") {
+ continue
+ }
+
+ // Get the object destination path
+ objDst, err := filepath.Rel(path, objPath)
+ if err != nil {
+ return err
+ }
+ objDst = filepath.Join(dst, objDst)
+
+ if err := g.getObject(client, objDst, bucket, objPath, ""); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (g *S3Getter) GetFile(dst string, u *url.URL) error {
+ region, bucket, path, version, creds, err := g.parseUrl(u)
+ if err != nil {
+ return err
+ }
+
+ config := g.getAWSConfig(region, creds)
+ sess := session.New(config)
+ client := s3.New(sess)
+ return g.getObject(client, dst, bucket, path, version)
+}
+
+func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) error {
+ req := &s3.GetObjectInput{
+ Bucket: aws.String(bucket),
+ Key: aws.String(key),
+ }
+ if version != "" {
+ req.VersionId = aws.String(version)
+ }
+
+ resp, err := client.GetObject(req)
+ if err != nil {
+ return err
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ f, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ _, err = io.Copy(f, resp.Body)
+ return err
+}
+
+func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config {
+ conf := &aws.Config{}
+ if creds == nil {
+ // Grab the metadata URL
+ metadataURL := os.Getenv("AWS_METADATA_URL")
+ if metadataURL == "" {
+ metadataURL = "http://169.254.169.254:80/latest"
+ }
+
+ creds = credentials.NewChainCredentials(
+ []credentials.Provider{
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.New(session.New(&aws.Config{
+ Endpoint: aws.String(metadataURL),
+ })),
+ },
+ })
+ }
+
+ conf.Credentials = creds
+ if region != "" {
+ conf.Region = aws.String(region)
+ }
+
+ return conf
+}
+
+func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) {
+ // Expected host style: s3.amazonaws.com. They always have 3 parts,
+ // although the first may differ if we're accessing a specific region.
+ hostParts := strings.Split(u.Host, ".")
+ if len(hostParts) != 3 {
+ err = fmt.Errorf("URL is not a valid S3 URL")
+ return
+ }
+
+ // Parse the region out of the first part of the host
+ region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3")
+ if region == "" {
+ region = "us-east-1"
+ }
+
+ pathParts := strings.SplitN(u.Path, "/", 3)
+ if len(pathParts) != 3 {
+ err = fmt.Errorf("URL is not a valid S3 URL")
+ return
+ }
+
+ bucket = pathParts[1]
+ path = pathParts[2]
+ version = u.Query().Get("version")
+
+ _, hasAwsId := u.Query()["aws_access_key_id"]
+ _, hasAwsSecret := u.Query()["aws_access_key_secret"]
+ _, hasAwsToken := u.Query()["aws_access_token"]
+ if hasAwsId || hasAwsSecret || hasAwsToken {
+ creds = credentials.NewStaticCredentials(
+ u.Query().Get("aws_access_key_id"),
+ u.Query().Get("aws_access_key_secret"),
+ u.Query().Get("aws_access_token"),
+ )
+ }
+
+ return
+}
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url.go b/vendor/github.com/hashicorp/go-getter/helper/url/url.go
new file mode 100644
index 00000000..02497c25
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/helper/url/url.go
@@ -0,0 +1,14 @@
+package url
+
+import (
+ "net/url"
+)
+
+// Parse parses rawURL into a URL structure.
+// The rawURL may be relative or absolute.
+//
+// Parse is a wrapper for the Go stdlib net/url Parse function, but returns
+// Windows "safe" URLs on Windows platforms.
+func Parse(rawURL string) (*url.URL, error) {
+ return parse(rawURL)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go
new file mode 100644
index 00000000..ed1352a9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package url
+
+import (
+ "net/url"
+)
+
+func parse(rawURL string) (*url.URL, error) {
+ return url.Parse(rawURL)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go
new file mode 100644
index 00000000..4655226f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go
@@ -0,0 +1,40 @@
+package url
+
+import (
+ "fmt"
+ "net/url"
+ "path/filepath"
+ "strings"
+)
+
+func parse(rawURL string) (*url.URL, error) {
+ // Make sure we're using "/" since URLs are "/"-based.
+ rawURL = filepath.ToSlash(rawURL)
+
+ u, err := url.Parse(rawURL)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(rawURL) > 1 && rawURL[1] == ':' {
+ // Assume we're dealing with a drive letter file path where the drive
+ // letter has been parsed into the URL Scheme, and the rest of the path
+ // has been parsed into the URL Path without the leading ':' character.
+ u.Path = fmt.Sprintf("%s:%s", string(rawURL[0]), u.Path)
+ u.Scheme = ""
+ }
+
+ if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") {
+ // Assume we're dealing with a drive letter file path where the drive
+ // letter has been parsed into the URL Host.
+ u.Path = fmt.Sprintf("%s%s", u.Host, u.Path)
+ u.Host = ""
+ }
+
+ // Remove leading slash for absolute file paths.
+ if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' {
+ u.Path = u.Path[1:]
+ }
+
+ return u, err
+}
diff --git a/vendor/github.com/hashicorp/go-getter/netrc.go b/vendor/github.com/hashicorp/go-getter/netrc.go
new file mode 100644
index 00000000..c7f6a3fb
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/netrc.go
@@ -0,0 +1,67 @@
+package getter
+
+import (
+ "fmt"
+ "net/url"
+ "os"
+ "runtime"
+
+ "github.com/bgentry/go-netrc/netrc"
+ "github.com/mitchellh/go-homedir"
+)
+
+// addAuthFromNetrc adds auth information to the URL from the user's
+// netrc file if it can be found. This will only add the auth info
+// if the URL doesn't already have auth info specified and the
+// the username is blank.
+func addAuthFromNetrc(u *url.URL) error {
+ // If the URL already has auth information, do nothing
+ if u.User != nil && u.User.Username() != "" {
+ return nil
+ }
+
+ // Get the netrc file path
+ path := os.Getenv("NETRC")
+ if path == "" {
+ filename := ".netrc"
+ if runtime.GOOS == "windows" {
+ filename = "_netrc"
+ }
+
+ var err error
+ path, err = homedir.Expand("~/" + filename)
+ if err != nil {
+ return err
+ }
+ }
+
+ // If the file is not a file, then do nothing
+ if fi, err := os.Stat(path); err != nil {
+ // File doesn't exist, do nothing
+ if os.IsNotExist(err) {
+ return nil
+ }
+
+ // Some other error!
+ return err
+ } else if fi.IsDir() {
+ // File is directory, ignore
+ return nil
+ }
+
+ // Load up the netrc file
+ net, err := netrc.ParseFile(path)
+ if err != nil {
+ return fmt.Errorf("Error parsing netrc file at %q: %s", path, err)
+ }
+
+ machine := net.FindMachine(u.Host)
+ if machine == nil {
+ // Machine not found, no problem
+ return nil
+ }
+
+ // Set the user info
+ u.User = url.UserPassword(machine.Login, machine.Password)
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go
new file mode 100644
index 00000000..4d5ee3cc
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/source.go
@@ -0,0 +1,36 @@
+package getter
+
+import (
+ "strings"
+)
+
+// SourceDirSubdir takes a source and returns a tuple of the URL without
+// the subdir and the URL with the subdir.
+func SourceDirSubdir(src string) (string, string) {
+ // Calcaulate an offset to avoid accidentally marking the scheme
+ // as the dir.
+ var offset int
+ if idx := strings.Index(src, "://"); idx > -1 {
+ offset = idx + 3
+ }
+
+ // First see if we even have an explicit subdir
+ idx := strings.Index(src[offset:], "//")
+ if idx == -1 {
+ return src, ""
+ }
+
+ idx += offset
+ subdir := src[idx+2:]
+ src = src[:idx]
+
+ // Next, check if we have query parameters and push them onto the
+ // URL.
+ if idx = strings.Index(subdir, "?"); idx > -1 {
+ query := subdir[idx:]
+ subdir = subdir[:idx]
+ src += query
+ }
+
+ return src, subdir
+}
diff --git a/vendor/github.com/hashicorp/go-getter/storage.go b/vendor/github.com/hashicorp/go-getter/storage.go
new file mode 100644
index 00000000..2bc6b9ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/storage.go
@@ -0,0 +1,13 @@
+package getter
+
+// Storage is an interface that knows how to lookup downloaded directories
+// as well as download and update directories from their sources into the
+// proper location.
+type Storage interface {
+ // Dir returns the directory on local disk where the directory source
+ // can be loaded from.
+ Dir(string) (string, bool, error)
+
+ // Get will download and optionally update the given directory.
+ Get(string, string, bool) error
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE
new file mode 100644
index 00000000..82b4de97
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/LICENSE
@@ -0,0 +1,353 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md
new file mode 100644
index 00000000..ead5830f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/README.md
@@ -0,0 +1,97 @@
+# go-multierror
+
+[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis]
+[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
+
+[travis]: https://travis-ci.org/hashicorp/go-multierror
+[godocs]: https://godoc.org/github.com/hashicorp/go-multierror
+
+`go-multierror` is a package for Go that provides a mechanism for
+representing a list of `error` values as a single `error`.
+
+This allows a function in Go to return an `error` that might actually
+be a list of errors. If the caller knows this, they can unwrap the
+list and access the errors. If the caller doesn't know, the error
+formats to a nice human-readable format.
+
+`go-multierror` implements the
+[errwrap](https://github.com/hashicorp/errwrap) interface so that it can
+be used with that library, as well.
+
+## Installation and Docs
+
+Install using `go get github.com/hashicorp/go-multierror`.
+
+Full documentation is available at
+http://godoc.org/github.com/hashicorp/go-multierror
+
+## Usage
+
+go-multierror is easy to use and purposely built to be unobtrusive in
+existing Go applications/libraries that may not be aware of it.
+
+**Building a list of errors**
+
+The `Append` function is used to create a list of errors. This function
+behaves a lot like the Go built-in `append` function: it doesn't matter
+if the first argument is nil, a `multierror.Error`, or any other `error`,
+the function behaves as you would expect.
+
+```go
+var result error
+
+if err := step1(); err != nil {
+ result = multierror.Append(result, err)
+}
+if err := step2(); err != nil {
+ result = multierror.Append(result, err)
+}
+
+return result
+```
+
+**Customizing the formatting of the errors**
+
+By specifying a custom `ErrorFormat`, you can customize the format
+of the `Error() string` function:
+
+```go
+var result *multierror.Error
+
+// ... accumulate errors here, maybe using Append
+
+if result != nil {
+ result.ErrorFormat = func([]error) string {
+ return "errors!"
+ }
+}
+```
+
+**Accessing the list of errors**
+
+`multierror.Error` implements `error` so if the caller doesn't know about
+multierror, it will work just fine. But if you're aware a multierror might
+be returned, you can use type switches to access the list of errors:
+
+```go
+if err := something(); err != nil {
+ if merr, ok := err.(*multierror.Error); ok {
+ // Use merr.Errors
+ }
+}
+```
+
+**Returning a multierror only if there are errors**
+
+If you build a `multierror.Error`, you can use the `ErrorOrNil` function
+to return an `error` implementation only if there are errors to return:
+
+```go
+var result *multierror.Error
+
+// ... accumulate errors here
+
+// Return the `error` only if errors were added to the multierror, otherwise
+// return nil since there are no errors.
+return result.ErrorOrNil()
+```
diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go
new file mode 100644
index 00000000..775b6e75
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/append.go
@@ -0,0 +1,41 @@
+package multierror
+
+// Append is a helper function that will append more errors
+// onto an Error in order to create a larger multi-error.
+//
+// If err is not a multierror.Error, then it will be turned into
+// one. If any of the errs are multierr.Error, they will be flattened
+// one level into err.
+func Append(err error, errs ...error) *Error {
+ switch err := err.(type) {
+ case *Error:
+ // Typed nils can reach here, so initialize if we are nil
+ if err == nil {
+ err = new(Error)
+ }
+
+ // Go through each error and flatten
+ for _, e := range errs {
+ switch e := e.(type) {
+ case *Error:
+ if e != nil {
+ err.Errors = append(err.Errors, e.Errors...)
+ }
+ default:
+ if e != nil {
+ err.Errors = append(err.Errors, e)
+ }
+ }
+ }
+
+ return err
+ default:
+ newErrs := make([]error, 0, len(errs)+1)
+ if err != nil {
+ newErrs = append(newErrs, err)
+ }
+ newErrs = append(newErrs, errs...)
+
+ return Append(&Error{}, newErrs...)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go
new file mode 100644
index 00000000..aab8e9ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/flatten.go
@@ -0,0 +1,26 @@
+package multierror
+
+// Flatten flattens the given error, merging any *Errors together into
+// a single *Error.
+func Flatten(err error) error {
+ // If it isn't an *Error, just return the error as-is
+ if _, ok := err.(*Error); !ok {
+ return err
+ }
+
+ // Otherwise, make the result and flatten away!
+ flatErr := new(Error)
+ flatten(err, flatErr)
+ return flatErr
+}
+
+func flatten(err error, flatErr *Error) {
+ switch err := err.(type) {
+ case *Error:
+ for _, e := range err.Errors {
+ flatten(e, flatErr)
+ }
+ default:
+ flatErr.Errors = append(flatErr.Errors, err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go
new file mode 100644
index 00000000..6c7a3cc9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/format.go
@@ -0,0 +1,27 @@
+package multierror
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ErrorFormatFunc is a function callback that is called by Error to
+// turn the list of errors into a string.
+type ErrorFormatFunc func([]error) string
+
+// ListFormatFunc is a basic formatter that outputs the number of errors
+// that occurred along with a bullet point list of the errors.
+func ListFormatFunc(es []error) string {
+ if len(es) == 1 {
+ return fmt.Sprintf("1 error occurred:\n\n* %s", es[0])
+ }
+
+ points := make([]string, len(es))
+ for i, err := range es {
+ points[i] = fmt.Sprintf("* %s", err)
+ }
+
+ return fmt.Sprintf(
+ "%d errors occurred:\n\n%s",
+ len(es), strings.Join(points, "\n"))
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go
new file mode 100644
index 00000000..2ea08273
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/multierror.go
@@ -0,0 +1,51 @@
+package multierror
+
+import (
+ "fmt"
+)
+
+// Error is an error type to track multiple errors. This is used to
+// accumulate errors in cases and return them as a single "error".
+type Error struct {
+ Errors []error
+ ErrorFormat ErrorFormatFunc
+}
+
+func (e *Error) Error() string {
+ fn := e.ErrorFormat
+ if fn == nil {
+ fn = ListFormatFunc
+ }
+
+ return fn(e.Errors)
+}
+
+// ErrorOrNil returns an error interface if this Error represents
+// a list of errors, or returns nil if the list of errors is empty. This
+// function is useful at the end of accumulation to make sure that the value
+// returned represents the existence of errors.
+func (e *Error) ErrorOrNil() error {
+ if e == nil {
+ return nil
+ }
+ if len(e.Errors) == 0 {
+ return nil
+ }
+
+ return e
+}
+
+func (e *Error) GoString() string {
+ return fmt.Sprintf("*%#v", *e)
+}
+
+// WrappedErrors returns the list of errors that this Error is wrapping.
+// It is an implementatin of the errwrap.Wrapper interface so that
+// multierror.Error can be used with that library.
+//
+// This method is not safe to be called concurrently and is no different
+// than accessing the Errors field directly. It is implementd only to
+// satisfy the errwrap.Wrapper interface.
+func (e *Error) WrappedErrors() []error {
+ return e.Errors
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go
new file mode 100644
index 00000000..5c477abe
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/prefix.go
@@ -0,0 +1,37 @@
+package multierror
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/errwrap"
+)
+
+// Prefix is a helper function that will prefix some text
+// to the given error. If the error is a multierror.Error, then
+// it will be prefixed to each wrapped error.
+//
+// This is useful to use when appending multiple multierrors
+// together in order to give better scoping.
+func Prefix(err error, prefix string) error {
+ if err == nil {
+ return nil
+ }
+
+ format := fmt.Sprintf("%s {{err}}", prefix)
+ switch err := err.(type) {
+ case *Error:
+ // Typed nils can reach here, so initialize if we are nil
+ if err == nil {
+ err = new(Error)
+ }
+
+ // Wrap each of the errors
+ for i, e := range err.Errors {
+ err.Errors[i] = errwrap.Wrapf(format, e)
+ }
+
+ return err
+ default:
+ return errwrap.Wrapf(format, err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE
new file mode 100644
index 00000000..82b4de97
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/LICENSE
@@ -0,0 +1,353 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md
new file mode 100644
index 00000000..28c63556
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/README.md
@@ -0,0 +1,160 @@
+# Go Plugin System over RPC
+
+`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system
+that has been in use by HashiCorp tooling for over 4 years. While initially
+created for [Packer](https://www.packer.io), it is additionally in use by
+[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and
+[Vault](https://www.vaultproject.io).
+
+While the plugin system is over RPC, it is currently only designed to work
+over a local [reliable] network. Plugins over a real network are not supported
+and will lead to unexpected behavior.
+
+This plugin system has been used on millions of machines across many different
+projects and has proven to be battle hardened and ready for production use.
+
+## Features
+
+The HashiCorp plugin system supports a number of features:
+
+**Plugins are Go interface implementations.** This makes writing and consuming
+plugins feel very natural. To a plugin author: you just implement an
+interface as if it were going to run in the same process. For a plugin user:
+you just use and call functions on an interface as if it were in the same
+process. This plugin system handles the communication in between.
+
+**Complex arguments and return values are supported.** This library
+provides APIs for handling complex arguments and return values such
+as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library
+(`MuxBroker`) for creating new connections between the client/server to
+serve additional interfaces or transfer raw data.
+
+**Bidirectional communication.** Because the plugin system supports
+complex arguments, the host process can send it interface implementations
+and the plugin can call back into the host process.
+
+**Built-in Logging.** Any plugins that use the `log` standard library
+will have log data automatically sent to the host process. The host
+process will mirror this output prefixed with the path to the plugin
+binary. This makes debugging with plugins simple.
+
+**Protocol Versioning.** A very basic "protocol version" is supported that
+can be incremented to invalidate any previous plugins. This is useful when
+interface signatures are changing, protocol level changes are necessary,
+etc. When a protocol version is incompatible, a human friendly error
+message is shown to the end user.
+
+**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue
+to use stdout/stderr as usual and the output will get mirrored back to
+the host process. The host process can control what `io.Writer` these
+streams go to to prevent this from happening.
+
+**TTY Preservation.** Plugin subprocesses are connected to the identical
+stdin file descriptor as the host process, allowing software that requires
+a TTY to work. For example, a plugin can execute `ssh` and even though there
+are multiple subprocesses and RPC happening, it will look and act perfectly
+to the end user.
+
+**Host upgrade while a plugin is running.** Plugins can be "reattached"
+so that the host process can be upgraded while the plugin is still running.
+This requires the host/plugin to know this is possible and daemonize
+properly. `NewClient` takes a `ReattachConfig` to determine if and how to
+reattach.
+
+**Cryptographically Secure Plugins.** Plugins can be verified with an expected
+checksum and RPC communications can be configured to use TLS. The host process
+must be properly secured to protect this configuration.
+
+## Architecture
+
+The HashiCorp plugin system works by launching subprocesses and communicating
+over RPC (using standard `net/rpc`). A single connection is made between
+any plugin and the host process, and we use a
+[connection multiplexing](https://github.com/hashicorp/yamux)
+library to multiplex any other connections on top.
+
+This architecture has a number of benefits:
+
+ * Plugins can't crash your host process: A panic in a plugin doesn't
+ panic the plugin user.
+
+ * Plugins are very easy to write: just write a Go application and `go build`.
+ Theoretically you could also use another language as long as it can
+ communicate the Go `net/rpc` protocol but this hasn't yet been tried.
+
+ * Plugins are very easy to install: just put the binary in a location where
+ the host will find it (depends on the host but this library also provides
+ helpers), and the plugin host handles the rest.
+
+ * Plugins can be relatively secure: The plugin only has access to the
+ interfaces and args given to it, not to the entire memory space of the
+ process. More security features are planned (see the coming soon section
+ below).
+
+## Usage
+
+To use the plugin system, you must take the following steps. These are
+high-level steps that must be done. Examples are available in the
+`examples/` directory.
+
+ 1. Choose the interface(s) you want to expose for plugins.
+
+ 2. For each interface, implement an implementation of that interface
+ that communicates over an `*rpc.Client` (from the standard `net/rpc`
+ package) for every function call. Likewise, implement the RPC server
+ struct this communicates to which is then communicating to a real,
+ concrete implementation.
+
+ 3. Create a `Plugin` implementation that knows how to create the RPC
+ client/server for a given plugin type.
+
+ 4. Plugin authors call `plugin.Serve` to serve a plugin from the
+ `main` function.
+
+ 5. Plugin users use `plugin.Client` to launch a subprocess and request
+ an interface implementation over RPC.
+
+That's it! In practice, step 2 is the most tedious and time consuming step.
+Even so, it isn't very difficult and you can see examples in the `examples/`
+directory as well as throughout our various open source projects.
+
+For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin).
+
+## Roadmap
+
+Our plugin system is constantly evolving. As we use the plugin system for
+new projects or for new features in existing projects, we constantly find
+improvements we can make.
+
+At this point in time, the roadmap for the plugin system is:
+
+**Semantic Versioning.** Plugins will be able to implement a semantic version.
+This plugin system will give host processes a system for constraining
+versions. This is in addition to the protocol versioning already present
+which is more for larger underlying changes.
+
+**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter)
+to support automatic download + install of plugins. Paired with cryptographically
+secure plugins (above), we can make this a safe operation for an amazing
+user experience.
+
+## What About Shared Libraries?
+
+When we started using plugins (late 2012, early 2013), plugins over RPC
+were the only option since Go didn't support dynamic library loading. Today,
+Go still doesn't support dynamic library loading, but they do intend to.
+Since 2012, our plugin system has stabilized from millions of users using it,
+and has many benefits we've come to value greatly.
+
+For example, we intend to use this plugin system in
+[Vault](https://www.vaultproject.io), and dynamic library loading will
+simply never be acceptable in Vault for security reasons. That is an extreme
+example, but we believe our library system has more upsides than downsides
+over dynamic library loading and since we've had it built and tested for years,
+we'll likely continue to use it.
+
+Shared libraries have one major advantage over our system which is much
+higher performance. In real world scenarios across our various tools,
+we've never required any more performance out of our plugin system and it
+has seen very high throughput, so this isn't a concern for us at the moment.
+
diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go
new file mode 100644
index 00000000..b69d41b2
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/client.go
@@ -0,0 +1,666 @@
+package plugin
+
+import (
+ "bufio"
+ "crypto/subtle"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode"
+)
+
+// If this is 1, then we've called CleanupClients. This can be used
+// by plugin RPC implementations to change error behavior since you
+// can expected network connection errors at this point. This should be
+// read by using sync/atomic.
+var Killed uint32 = 0
+
+// This is a slice of the "managed" clients which are cleaned up when
+// calling Cleanup
+var managedClients = make([]*Client, 0, 5)
+var managedClientsLock sync.Mutex
+
+// Error types
+var (
+ // ErrProcessNotFound is returned when a client is instantiated to
+ // reattach to an existing process and it isn't found.
+ ErrProcessNotFound = errors.New("Reattachment process not found")
+
+ // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match
+ // the one provided in the SecureConfig.
+ ErrChecksumsDoNotMatch = errors.New("checksums did not match")
+
+ // ErrSecureNoChecksum is returned when an empty checksum is provided to the
+ // SecureConfig.
+ ErrSecureConfigNoChecksum = errors.New("no checksum provided")
+
+ // ErrSecureNoHash is returned when a nil Hash object is provided to the
+ // SecureConfig.
+ ErrSecureConfigNoHash = errors.New("no hash implementation provided")
+
+ // ErrSecureConfigAndReattach is returned when both Reattach and
+ // SecureConfig are set.
+ ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set")
+)
+
+// Client handles the lifecycle of a plugin application. It launches
+// plugins, connects to them, dispenses interface implementations, and handles
+// killing the process.
+//
+// Plugin hosts should use one Client for each plugin executable. To
+// dispense a plugin type, use the `Client.Client` function, and then
+// cal `Dispense`. This awkward API is mostly historical but is used to split
+// the client that deals with subprocess management and the client that
+// does RPC management.
+//
+// See NewClient and ClientConfig for using a Client.
+type Client struct {
+ config *ClientConfig
+ exited bool
+ doneLogging chan struct{}
+ l sync.Mutex
+ address net.Addr
+ process *os.Process
+ client *RPCClient
+}
+
+// ClientConfig is the configuration used to initialize a new
+// plugin client. After being used to initialize a plugin client,
+// that configuration must not be modified again.
+type ClientConfig struct {
+ // HandshakeConfig is the configuration that must match servers.
+ HandshakeConfig
+
+ // Plugins are the plugins that can be consumed.
+ Plugins map[string]Plugin
+
+ // One of the following must be set, but not both.
+ //
+ // Cmd is the unstarted subprocess for starting the plugin. If this is
+ // set, then the Client starts the plugin process on its own and connects
+ // to it.
+ //
+ // Reattach is configuration for reattaching to an existing plugin process
+ // that is already running. This isn't common.
+ Cmd *exec.Cmd
+ Reattach *ReattachConfig
+
+ // SecureConfig is configuration for verifying the integrity of the
+ // executable. It can not be used with Reattach.
+ SecureConfig *SecureConfig
+
+ // TLSConfig is used to enable TLS on the RPC client.
+ TLSConfig *tls.Config
+
+ // Managed represents if the client should be managed by the
+ // plugin package or not. If true, then by calling CleanupClients,
+ // it will automatically be cleaned up. Otherwise, the client
+ // user is fully responsible for making sure to Kill all plugin
+ // clients. By default the client is _not_ managed.
+ Managed bool
+
+ // The minimum and maximum port to use for communicating with
+ // the subprocess. If not set, this defaults to 10,000 and 25,000
+ // respectively.
+ MinPort, MaxPort uint
+
+ // StartTimeout is the timeout to wait for the plugin to say it
+ // has started successfully.
+ StartTimeout time.Duration
+
+ // If non-nil, then the stderr of the client will be written to here
+ // (as well as the log). This is the original os.Stderr of the subprocess.
+ // This isn't the output of synced stderr.
+ Stderr io.Writer
+
+ // SyncStdout, SyncStderr can be set to override the
+ // respective os.Std* values in the plugin. Care should be taken to
+ // avoid races here. If these are nil, then this will automatically be
+ // hooked up to os.Stdin, Stdout, and Stderr, respectively.
+ //
+ // If the default values (nil) are used, then this package will not
+ // sync any of these streams.
+ SyncStdout io.Writer
+ SyncStderr io.Writer
+}
+
+// ReattachConfig is used to configure a client to reattach to an
+// already-running plugin process. You can retrieve this information by
+// calling ReattachConfig on Client.
+type ReattachConfig struct {
+ Addr net.Addr
+ Pid int
+}
+
+// SecureConfig is used to configure a client to verify the integrity of an
+// executable before running. It does this by verifying the checksum is
+// expected. Hash is used to specify the hashing method to use when checksumming
+// the file. The configuration is verified by the client by calling the
+// SecureConfig.Check() function.
+//
+// The host process should ensure the checksum was provided by a trusted and
+// authoritative source. The binary should be installed in such a way that it
+// can not be modified by an unauthorized user between the time of this check
+// and the time of execution.
+type SecureConfig struct {
+ Checksum []byte
+ Hash hash.Hash
+}
+
+// Check takes the filepath to an executable and returns true if the checksum of
+// the file matches the checksum provided in the SecureConfig.
+func (s *SecureConfig) Check(filePath string) (bool, error) {
+ if len(s.Checksum) == 0 {
+ return false, ErrSecureConfigNoChecksum
+ }
+
+ if s.Hash == nil {
+ return false, ErrSecureConfigNoHash
+ }
+
+ file, err := os.Open(filePath)
+ if err != nil {
+ return false, err
+ }
+ defer file.Close()
+
+ _, err = io.Copy(s.Hash, file)
+ if err != nil {
+ return false, err
+ }
+
+ sum := s.Hash.Sum(nil)
+
+ return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil
+}
+
+// This makes sure all the managed subprocesses are killed and properly
+// logged. This should be called before the parent process running the
+// plugins exits.
+//
+// This must only be called _once_.
+func CleanupClients() {
+ // Set the killed to true so that we don't get unexpected panics
+ atomic.StoreUint32(&Killed, 1)
+
+ // Kill all the managed clients in parallel and use a WaitGroup
+ // to wait for them all to finish up.
+ var wg sync.WaitGroup
+ managedClientsLock.Lock()
+ for _, client := range managedClients {
+ wg.Add(1)
+
+ go func(client *Client) {
+ client.Kill()
+ wg.Done()
+ }(client)
+ }
+ managedClientsLock.Unlock()
+
+ log.Println("[DEBUG] plugin: waiting for all plugin processes to complete...")
+ wg.Wait()
+}
+
+// Creates a new plugin client which manages the lifecycle of an external
+// plugin and gets the address for the RPC connection.
+//
+// The client must be cleaned up at some point by calling Kill(). If
+// the client is a managed client (created with NewManagedClient) you
+// can just call CleanupClients at the end of your program and they will
+// be properly cleaned.
+func NewClient(config *ClientConfig) (c *Client) {
+ if config.MinPort == 0 && config.MaxPort == 0 {
+ config.MinPort = 10000
+ config.MaxPort = 25000
+ }
+
+ if config.StartTimeout == 0 {
+ config.StartTimeout = 1 * time.Minute
+ }
+
+ if config.Stderr == nil {
+ config.Stderr = ioutil.Discard
+ }
+
+ if config.SyncStdout == nil {
+ config.SyncStdout = ioutil.Discard
+ }
+ if config.SyncStderr == nil {
+ config.SyncStderr = ioutil.Discard
+ }
+
+ c = &Client{config: config}
+ if config.Managed {
+ managedClientsLock.Lock()
+ managedClients = append(managedClients, c)
+ managedClientsLock.Unlock()
+ }
+
+ return
+}
+
+// Client returns an RPC client for the plugin.
+//
+// Subsequent calls to this will return the same RPC client.
+func (c *Client) Client() (*RPCClient, error) {
+ addr, err := c.Start()
+ if err != nil {
+ return nil, err
+ }
+
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.client != nil {
+ return c.client, nil
+ }
+
+ // Connect to the client
+ conn, err := net.Dial(addr.Network(), addr.String())
+ if err != nil {
+ return nil, err
+ }
+ if tcpConn, ok := conn.(*net.TCPConn); ok {
+ // Make sure to set keep alive so that the connection doesn't die
+ tcpConn.SetKeepAlive(true)
+ }
+
+ if c.config.TLSConfig != nil {
+ conn = tls.Client(conn, c.config.TLSConfig)
+ }
+
+ // Create the actual RPC client
+ c.client, err = NewRPCClient(conn, c.config.Plugins)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Begin the stream syncing so that stdin, out, err work properly
+ err = c.client.SyncStreams(
+ c.config.SyncStdout,
+ c.config.SyncStderr)
+ if err != nil {
+ c.client.Close()
+ c.client = nil
+ return nil, err
+ }
+
+ return c.client, nil
+}
+
+// Tells whether or not the underlying process has exited.
+func (c *Client) Exited() bool {
+ c.l.Lock()
+ defer c.l.Unlock()
+ return c.exited
+}
+
+// End the executing subprocess (if it is running) and perform any cleanup
+// tasks necessary such as capturing any remaining logs and so on.
+//
+// This method blocks until the process successfully exits.
+//
+// This method can safely be called multiple times.
+func (c *Client) Kill() {
+ // Grab a lock to read some private fields.
+ c.l.Lock()
+ process := c.process
+ addr := c.address
+ doneCh := c.doneLogging
+ c.l.Unlock()
+
+ // If there is no process, we never started anything. Nothing to kill.
+ if process == nil {
+ return
+ }
+
+ // We need to check for address here. It is possible that the plugin
+ // started (process != nil) but has no address (addr == nil) if the
+ // plugin failed at startup. If we do have an address, we need to close
+ // the plugin net connections.
+ graceful := false
+ if addr != nil {
+ // Close the client to cleanly exit the process.
+ client, err := c.Client()
+ if err == nil {
+ err = client.Close()
+
+ // If there is no error, then we attempt to wait for a graceful
+ // exit. If there was an error, we assume that graceful cleanup
+ // won't happen and just force kill.
+ graceful = err == nil
+ if err != nil {
+ // If there was an error just log it. We're going to force
+ // kill in a moment anyways.
+ log.Printf(
+ "[WARN] plugin: error closing client during Kill: %s", err)
+ }
+ }
+ }
+
+ // If we're attempting a graceful exit, then we wait for a short period
+ // of time to allow that to happen. To wait for this we just wait on the
+ // doneCh which would be closed if the process exits.
+ if graceful {
+ select {
+ case <-doneCh:
+ return
+ case <-time.After(250 * time.Millisecond):
+ }
+ }
+
+ // If graceful exiting failed, just kill it
+ process.Kill()
+
+ // Wait for the client to finish logging so we have a complete log
+ <-doneCh
+}
+
+// Starts the underlying subprocess, communicating with it to negotiate
+// a port for RPC connections, and returning the address to connect via RPC.
+//
+// This method is safe to call multiple times. Subsequent calls have no effect.
+// Once a client has been started once, it cannot be started again, even if
+// it was killed.
+func (c *Client) Start() (addr net.Addr, err error) {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.address != nil {
+ return c.address, nil
+ }
+
+ // If one of cmd or reattach isn't set, then it is an error. We wrap
+ // this in a {} for scoping reasons, and hopeful that the escape
+ // analysis will pop the stock here.
+ {
+ cmdSet := c.config.Cmd != nil
+ attachSet := c.config.Reattach != nil
+ secureSet := c.config.SecureConfig != nil
+ if cmdSet == attachSet {
+ return nil, fmt.Errorf("Only one of Cmd or Reattach must be set")
+ }
+
+ if secureSet && attachSet {
+ return nil, ErrSecureConfigAndReattach
+ }
+ }
+
+ // Create the logging channel for when we kill
+ c.doneLogging = make(chan struct{})
+
+ if c.config.Reattach != nil {
+ // Verify the process still exists. If not, then it is an error
+ p, err := os.FindProcess(c.config.Reattach.Pid)
+ if err != nil {
+ return nil, err
+ }
+
+ // Attempt to connect to the addr since on Unix systems FindProcess
+ // doesn't actually return an error if it can't find the process.
+ conn, err := net.Dial(
+ c.config.Reattach.Addr.Network(),
+ c.config.Reattach.Addr.String())
+ if err != nil {
+ p.Kill()
+ return nil, ErrProcessNotFound
+ }
+ conn.Close()
+
+ // Goroutine to mark exit status
+ go func(pid int) {
+ // Wait for the process to die
+ pidWait(pid)
+
+ // Log so we can see it
+ log.Printf("[DEBUG] plugin: reattached plugin process exited\n")
+
+ // Mark it
+ c.l.Lock()
+ defer c.l.Unlock()
+ c.exited = true
+
+ // Close the logging channel since that doesn't work on reattach
+ close(c.doneLogging)
+ }(p.Pid)
+
+ // Set the address and process
+ c.address = c.config.Reattach.Addr
+ c.process = p
+
+ return c.address, nil
+ }
+
+ env := []string{
+ fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue),
+ fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort),
+ fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort),
+ }
+
+ stdout_r, stdout_w := io.Pipe()
+ stderr_r, stderr_w := io.Pipe()
+
+ cmd := c.config.Cmd
+ cmd.Env = append(cmd.Env, os.Environ()...)
+ cmd.Env = append(cmd.Env, env...)
+ cmd.Stdin = os.Stdin
+ cmd.Stderr = stderr_w
+ cmd.Stdout = stdout_w
+
+ if c.config.SecureConfig != nil {
+ if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil {
+ return nil, fmt.Errorf("error verifying checksum: %s", err)
+ } else if !ok {
+ return nil, ErrChecksumsDoNotMatch
+ }
+ }
+
+ log.Printf("[DEBUG] plugin: starting plugin: %s %#v", cmd.Path, cmd.Args)
+ err = cmd.Start()
+ if err != nil {
+ return
+ }
+
+ // Set the process
+ c.process = cmd.Process
+
+ // Make sure the command is properly cleaned up if there is an error
+ defer func() {
+ r := recover()
+
+ if err != nil || r != nil {
+ cmd.Process.Kill()
+ }
+
+ if r != nil {
+ panic(r)
+ }
+ }()
+
+ // Start goroutine to wait for process to exit
+ exitCh := make(chan struct{})
+ go func() {
+ // Make sure we close the write end of our stderr/stdout so
+ // that the readers send EOF properly.
+ defer stderr_w.Close()
+ defer stdout_w.Close()
+
+ // Wait for the command to end.
+ cmd.Wait()
+
+ // Log and make sure to flush the logs write away
+ log.Printf("[DEBUG] plugin: %s: plugin process exited\n", cmd.Path)
+ os.Stderr.Sync()
+
+ // Mark that we exited
+ close(exitCh)
+
+ // Set that we exited, which takes a lock
+ c.l.Lock()
+ defer c.l.Unlock()
+ c.exited = true
+ }()
+
+ // Start goroutine that logs the stderr
+ go c.logStderr(stderr_r)
+
+ // Start a goroutine that is going to be reading the lines
+ // out of stdout
+ linesCh := make(chan []byte)
+ go func() {
+ defer close(linesCh)
+
+ buf := bufio.NewReader(stdout_r)
+ for {
+ line, err := buf.ReadBytes('\n')
+ if line != nil {
+ linesCh <- line
+ }
+
+ if err == io.EOF {
+ return
+ }
+ }
+ }()
+
+ // Make sure after we exit we read the lines from stdout forever
+ // so they don't block since it is an io.Pipe
+ defer func() {
+ go func() {
+ for _ = range linesCh {
+ }
+ }()
+ }()
+
+ // Some channels for the next step
+ timeout := time.After(c.config.StartTimeout)
+
+ // Start looking for the address
+ log.Printf("[DEBUG] plugin: waiting for RPC address for: %s", cmd.Path)
+ select {
+ case <-timeout:
+ err = errors.New("timeout while waiting for plugin to start")
+ case <-exitCh:
+ err = errors.New("plugin exited before we could connect")
+ case lineBytes := <-linesCh:
+ // Trim the line and split by "|" in order to get the parts of
+ // the output.
+ line := strings.TrimSpace(string(lineBytes))
+ parts := strings.SplitN(line, "|", 4)
+ if len(parts) < 4 {
+ err = fmt.Errorf(
+ "Unrecognized remote plugin message: %s\n\n"+
+ "This usually means that the plugin is either invalid or simply\n"+
+ "needs to be recompiled to support the latest protocol.", line)
+ return
+ }
+
+ // Check the core protocol. Wrapped in a {} for scoping.
+ {
+ var coreProtocol int64
+ coreProtocol, err = strconv.ParseInt(parts[0], 10, 0)
+ if err != nil {
+ err = fmt.Errorf("Error parsing core protocol version: %s", err)
+ return
+ }
+
+ if int(coreProtocol) != CoreProtocolVersion {
+ err = fmt.Errorf("Incompatible core API version with plugin. "+
+ "Plugin version: %s, Ours: %d\n\n"+
+ "To fix this, the plugin usually only needs to be recompiled.\n"+
+ "Please report this to the plugin author.", parts[0], CoreProtocolVersion)
+ return
+ }
+ }
+
+ // Parse the protocol version
+ var protocol int64
+ protocol, err = strconv.ParseInt(parts[1], 10, 0)
+ if err != nil {
+ err = fmt.Errorf("Error parsing protocol version: %s", err)
+ return
+ }
+
+ // Test the API version
+ if uint(protocol) != c.config.ProtocolVersion {
+ err = fmt.Errorf("Incompatible API version with plugin. "+
+ "Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion)
+ return
+ }
+
+ switch parts[2] {
+ case "tcp":
+ addr, err = net.ResolveTCPAddr("tcp", parts[3])
+ case "unix":
+ addr, err = net.ResolveUnixAddr("unix", parts[3])
+ default:
+ err = fmt.Errorf("Unknown address type: %s", parts[3])
+ }
+ }
+
+ c.address = addr
+ return
+}
+
+// ReattachConfig returns the information that must be provided to NewClient
+// to reattach to the plugin process that this client started. This is
+// useful for plugins that detach from their parent process.
+//
+// If this returns nil then the process hasn't been started yet. Please
+// call Start or Client before calling this.
+func (c *Client) ReattachConfig() *ReattachConfig {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.address == nil {
+ return nil
+ }
+
+ if c.config.Cmd != nil && c.config.Cmd.Process == nil {
+ return nil
+ }
+
+ // If we connected via reattach, just return the information as-is
+ if c.config.Reattach != nil {
+ return c.config.Reattach
+ }
+
+ return &ReattachConfig{
+ Addr: c.address,
+ Pid: c.config.Cmd.Process.Pid,
+ }
+}
+
+func (c *Client) logStderr(r io.Reader) {
+ bufR := bufio.NewReader(r)
+ for {
+ line, err := bufR.ReadString('\n')
+ if line != "" {
+ c.config.Stderr.Write([]byte(line))
+
+ line = strings.TrimRightFunc(line, unicode.IsSpace)
+ log.Printf("[DEBUG] plugin: %s: %s", filepath.Base(c.config.Cmd.Path), line)
+ }
+
+ if err == io.EOF {
+ break
+ }
+ }
+
+ // Flag that we've completed logging for others
+ close(c.doneLogging)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go
new file mode 100644
index 00000000..d22c566e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/discover.go
@@ -0,0 +1,28 @@
+package plugin
+
+import (
+ "path/filepath"
+)
+
+// Discover discovers plugins that are in a given directory.
+//
+// The directory doesn't need to be absolute. For example, "." will work fine.
+//
+// This currently assumes any file matching the glob is a plugin.
+// In the future this may be smarter about checking that a file is
+// executable and so on.
+//
+// TODO: test
+func Discover(glob, dir string) ([]string, error) {
+ var err error
+
+ // Make the directory absolute if it isn't already
+ if !filepath.IsAbs(dir) {
+ dir, err = filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return filepath.Glob(filepath.Join(dir, glob))
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go
new file mode 100644
index 00000000..22a7baa6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/error.go
@@ -0,0 +1,24 @@
+package plugin
+
+// This is a type that wraps error types so that they can be messaged
+// across RPC channels. Since "error" is an interface, we can't always
+// gob-encode the underlying structure. This is a valid error interface
+// implementer that we will push across.
+type BasicError struct {
+ Message string
+}
+
+// NewBasicError is used to create a BasicError.
+//
+// err is allowed to be nil.
+func NewBasicError(err error) *BasicError {
+ if err == nil {
+ return nil
+ }
+
+ return &BasicError{err.Error()}
+}
+
+func (e *BasicError) Error() string {
+ return e.Message
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go
new file mode 100644
index 00000000..01c45ad7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go
@@ -0,0 +1,204 @@
+package plugin
+
+import (
+ "encoding/binary"
+ "fmt"
+ "log"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/hashicorp/yamux"
+)
+
+// MuxBroker is responsible for brokering multiplexed connections by unique ID.
+//
+// It is used by plugins to multiplex multiple RPC connections and data
+// streams on top of a single connection between the plugin process and the
+// host process.
+//
+// This allows a plugin to request a channel with a specific ID to connect to
+// or accept a connection from, and the broker handles the details of
+// holding these channels open while they're being negotiated.
+//
+// The Plugin interface has access to these for both Server and Client.
+// The broker can be used by either (optionally) to reserve and connect to
+// new multiplexed streams. This is useful for complex args and return values,
+// or anything else you might need a data stream for.
+type MuxBroker struct {
+ nextId uint32
+ session *yamux.Session
+ streams map[uint32]*muxBrokerPending
+
+ sync.Mutex
+}
+
+type muxBrokerPending struct {
+ ch chan net.Conn
+ doneCh chan struct{}
+}
+
+func newMuxBroker(s *yamux.Session) *MuxBroker {
+ return &MuxBroker{
+ session: s,
+ streams: make(map[uint32]*muxBrokerPending),
+ }
+}
+
+// Accept accepts a connection by ID.
+//
+// This should not be called multiple times with the same ID at one time.
+func (m *MuxBroker) Accept(id uint32) (net.Conn, error) {
+ var c net.Conn
+ p := m.getStream(id)
+ select {
+ case c = <-p.ch:
+ close(p.doneCh)
+ case <-time.After(5 * time.Second):
+ m.Lock()
+ defer m.Unlock()
+ delete(m.streams, id)
+
+ return nil, fmt.Errorf("timeout waiting for accept")
+ }
+
+ // Ack our connection
+ if err := binary.Write(c, binary.LittleEndian, id); err != nil {
+ c.Close()
+ return nil, err
+ }
+
+ return c, nil
+}
+
+// AcceptAndServe is used to accept a specific stream ID and immediately
+// serve an RPC server on that stream ID. This is used to easily serve
+// complex arguments.
+//
+// The served interface is always registered to the "Plugin" name.
+func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) {
+ conn, err := m.Accept(id)
+ if err != nil {
+ log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
+ return
+ }
+
+ serve(conn, "Plugin", v)
+}
+
+// Close closes the connection and all sub-connections.
+func (m *MuxBroker) Close() error {
+ return m.session.Close()
+}
+
+// Dial opens a connection by ID.
+func (m *MuxBroker) Dial(id uint32) (net.Conn, error) {
+ // Open the stream
+ stream, err := m.session.OpenStream()
+ if err != nil {
+ return nil, err
+ }
+
+ // Write the stream ID onto the wire.
+ if err := binary.Write(stream, binary.LittleEndian, id); err != nil {
+ stream.Close()
+ return nil, err
+ }
+
+ // Read the ack that we connected. Then we're off!
+ var ack uint32
+ if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil {
+ stream.Close()
+ return nil, err
+ }
+ if ack != id {
+ stream.Close()
+ return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id)
+ }
+
+ return stream, nil
+}
+
+// NextId returns a unique ID to use next.
+//
+// It is possible for very long-running plugin hosts to wrap this value,
+// though it would require a very large amount of RPC calls. In practice
+// we've never seen it happen.
+func (m *MuxBroker) NextId() uint32 {
+ return atomic.AddUint32(&m.nextId, 1)
+}
+
+// Run starts the brokering and should be executed in a goroutine, since it
+// blocks forever, or until the session closes.
+//
+// Uses of MuxBroker never need to call this. It is called internally by
+// the plugin host/client.
+func (m *MuxBroker) Run() {
+ for {
+ stream, err := m.session.AcceptStream()
+ if err != nil {
+ // Once we receive an error, just exit
+ break
+ }
+
+ // Read the stream ID from the stream
+ var id uint32
+ if err := binary.Read(stream, binary.LittleEndian, &id); err != nil {
+ stream.Close()
+ continue
+ }
+
+ // Initialize the waiter
+ p := m.getStream(id)
+ select {
+ case p.ch <- stream:
+ default:
+ }
+
+ // Wait for a timeout
+ go m.timeoutWait(id, p)
+ }
+}
+
+func (m *MuxBroker) getStream(id uint32) *muxBrokerPending {
+ m.Lock()
+ defer m.Unlock()
+
+ p, ok := m.streams[id]
+ if ok {
+ return p
+ }
+
+ m.streams[id] = &muxBrokerPending{
+ ch: make(chan net.Conn, 1),
+ doneCh: make(chan struct{}),
+ }
+ return m.streams[id]
+}
+
+func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) {
+ // Wait for the stream to either be picked up and connected, or
+ // for a timeout.
+ timeout := false
+ select {
+ case <-p.doneCh:
+ case <-time.After(5 * time.Second):
+ timeout = true
+ }
+
+ m.Lock()
+ defer m.Unlock()
+
+ // Delete the stream so no one else can grab it
+ delete(m.streams, id)
+
+ // If we timed out, then check if we have a channel in the buffer,
+ // and if so, close it.
+ if timeout {
+ select {
+ case s := <-p.ch:
+ s.Close()
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go
new file mode 100644
index 00000000..37c8fd65
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/plugin.go
@@ -0,0 +1,25 @@
+// The plugin package exposes functions and helpers for communicating to
+// plugins which are implemented as standalone binary applications.
+//
+// plugin.Client fully manages the lifecycle of executing the application,
+// connecting to it, and returning the RPC client for dispensing plugins.
+//
+// plugin.Serve fully manages listeners to expose an RPC server from a binary
+// that plugin.Client can connect to.
+package plugin
+
+import (
+ "net/rpc"
+)
+
+// Plugin is the interface that is implemented to serve/connect to an
+// inteface implementation.
+type Plugin interface {
+ // Server should return the RPC server compatible struct to serve
+ // the methods that the Client calls over net/rpc.
+ Server(*MuxBroker) (interface{}, error)
+
+ // Client returns an interface implementation for the plugin you're
+ // serving that communicates to the server end of the plugin.
+ Client(*MuxBroker, *rpc.Client) (interface{}, error)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go
new file mode 100644
index 00000000..88c999a5
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/process.go
@@ -0,0 +1,24 @@
+package plugin
+
+import (
+ "time"
+)
+
+// pidAlive checks whether a pid is alive.
+func pidAlive(pid int) bool {
+ return _pidAlive(pid)
+}
+
+// pidWait blocks for a process to exit.
+func pidWait(pid int) error {
+ ticker := time.NewTicker(1 * time.Second)
+ defer ticker.Stop()
+
+ for range ticker.C {
+ if !pidAlive(pid) {
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go
new file mode 100644
index 00000000..70ba546b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package plugin
+
+import (
+ "os"
+ "syscall"
+)
+
+// _pidAlive tests whether a process is alive or not by sending it Signal 0,
+// since Go otherwise has no way to test this.
+func _pidAlive(pid int) bool {
+ proc, err := os.FindProcess(pid)
+ if err == nil {
+ err = proc.Signal(syscall.Signal(0))
+ }
+
+ return err == nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go
new file mode 100644
index 00000000..9f7b0180
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go
@@ -0,0 +1,29 @@
+package plugin
+
+import (
+ "syscall"
+)
+
+const (
+ // Weird name but matches the MSDN docs
+ exit_STILL_ACTIVE = 259
+
+ processDesiredAccess = syscall.STANDARD_RIGHTS_READ |
+ syscall.PROCESS_QUERY_INFORMATION |
+ syscall.SYNCHRONIZE
+)
+
+// _pidAlive tests whether a process is alive or not
+func _pidAlive(pid int) bool {
+ h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid))
+ if err != nil {
+ return false
+ }
+
+ var ec uint32
+ if e := syscall.GetExitCodeProcess(h, &ec); e != nil {
+ return false
+ }
+
+ return ec == exit_STILL_ACTIVE
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go
new file mode 100644
index 00000000..29f9bf06
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go
@@ -0,0 +1,123 @@
+package plugin
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "net/rpc"
+
+ "github.com/hashicorp/yamux"
+)
+
+// RPCClient connects to an RPCServer over net/rpc to dispense plugin types.
+type RPCClient struct {
+ broker *MuxBroker
+ control *rpc.Client
+ plugins map[string]Plugin
+
+ // These are the streams used for the various stdout/err overrides
+ stdout, stderr net.Conn
+}
+
+// NewRPCClient creates a client from an already-open connection-like value.
+// Dial is typically used instead.
+func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) {
+ // Create the yamux client so we can multiplex
+ mux, err := yamux.Client(conn, nil)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Connect to the control stream.
+ control, err := mux.Open()
+ if err != nil {
+ mux.Close()
+ return nil, err
+ }
+
+ // Connect stdout, stderr streams
+ stdstream := make([]net.Conn, 2)
+ for i, _ := range stdstream {
+ stdstream[i], err = mux.Open()
+ if err != nil {
+ mux.Close()
+ return nil, err
+ }
+ }
+
+ // Create the broker and start it up
+ broker := newMuxBroker(mux)
+ go broker.Run()
+
+ // Build the client using our broker and control channel.
+ return &RPCClient{
+ broker: broker,
+ control: rpc.NewClient(control),
+ plugins: plugins,
+ stdout: stdstream[0],
+ stderr: stdstream[1],
+ }, nil
+}
+
+// SyncStreams should be called to enable syncing of stdout,
+// stderr with the plugin.
+//
+// This will return immediately and the syncing will continue to happen
+// in the background. You do not need to launch this in a goroutine itself.
+//
+// This should never be called multiple times.
+func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error {
+ go copyStream("stdout", stdout, c.stdout)
+ go copyStream("stderr", stderr, c.stderr)
+ return nil
+}
+
+// Close closes the connection. The client is no longer usable after this
+// is called.
+func (c *RPCClient) Close() error {
+ // Call the control channel and ask it to gracefully exit. If this
+ // errors, then we save it so that we always return an error but we
+ // want to try to close the other channels anyways.
+ var empty struct{}
+ returnErr := c.control.Call("Control.Quit", true, &empty)
+
+ // Close the other streams we have
+ if err := c.control.Close(); err != nil {
+ return err
+ }
+ if err := c.stdout.Close(); err != nil {
+ return err
+ }
+ if err := c.stderr.Close(); err != nil {
+ return err
+ }
+ if err := c.broker.Close(); err != nil {
+ return err
+ }
+
+ // Return back the error we got from Control.Quit. This is very important
+ // since we MUST return non-nil error if this fails so that Client.Kill
+ // will properly try a process.Kill.
+ return returnErr
+}
+
+func (c *RPCClient) Dispense(name string) (interface{}, error) {
+ p, ok := c.plugins[name]
+ if !ok {
+ return nil, fmt.Errorf("unknown plugin type: %s", name)
+ }
+
+ var id uint32
+ if err := c.control.Call(
+ "Dispenser.Dispense", name, &id); err != nil {
+ return nil, err
+ }
+
+ conn, err := c.broker.Dial(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.Client(c.broker, rpc.NewClient(conn))
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go
new file mode 100644
index 00000000..3984dc89
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go
@@ -0,0 +1,185 @@
+package plugin
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/rpc"
+ "sync"
+
+ "github.com/hashicorp/yamux"
+)
+
+// RPCServer listens for network connections and then dispenses interface
+// implementations over net/rpc.
+//
+// After setting the fields below, they shouldn't be read again directly
+// from the structure which may be reading/writing them concurrently.
+type RPCServer struct {
+ Plugins map[string]Plugin
+
+ // Stdout, Stderr are what this server will use instead of the
+ // normal stdin/out/err. This is because due to the multi-process nature
+ // of our plugin system, we can't use the normal process values so we
+ // make our own custom one we pipe across.
+ Stdout io.Reader
+ Stderr io.Reader
+
+ // DoneCh should be set to a non-nil channel that will be closed
+ // when the control requests the RPC server to end.
+ DoneCh chan<- struct{}
+
+ lock sync.Mutex
+}
+
+// Accept accepts connections on a listener and serves requests for
+// each incoming connection. Accept blocks; the caller typically invokes
+// it in a go statement.
+func (s *RPCServer) Accept(lis net.Listener) {
+ for {
+ conn, err := lis.Accept()
+ if err != nil {
+ log.Printf("[ERR] plugin: plugin server: %s", err)
+ return
+ }
+
+ go s.ServeConn(conn)
+ }
+}
+
+// ServeConn runs a single connection.
+//
+// ServeConn blocks, serving the connection until the client hangs up.
+func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) {
+ // First create the yamux server to wrap this connection
+ mux, err := yamux.Server(conn, nil)
+ if err != nil {
+ conn.Close()
+ log.Printf("[ERR] plugin: error creating yamux server: %s", err)
+ return
+ }
+
+ // Accept the control connection
+ control, err := mux.Accept()
+ if err != nil {
+ mux.Close()
+ if err != io.EOF {
+ log.Printf("[ERR] plugin: error accepting control connection: %s", err)
+ }
+
+ return
+ }
+
+ // Connect the stdstreams (in, out, err)
+ stdstream := make([]net.Conn, 2)
+ for i, _ := range stdstream {
+ stdstream[i], err = mux.Accept()
+ if err != nil {
+ mux.Close()
+ log.Printf("[ERR] plugin: accepting stream %d: %s", i, err)
+ return
+ }
+ }
+
+ // Copy std streams out to the proper place
+ go copyStream("stdout", stdstream[0], s.Stdout)
+ go copyStream("stderr", stdstream[1], s.Stderr)
+
+ // Create the broker and start it up
+ broker := newMuxBroker(mux)
+ go broker.Run()
+
+ // Use the control connection to build the dispenser and serve the
+ // connection.
+ server := rpc.NewServer()
+ server.RegisterName("Control", &controlServer{
+ server: s,
+ })
+ server.RegisterName("Dispenser", &dispenseServer{
+ broker: broker,
+ plugins: s.Plugins,
+ })
+ server.ServeConn(control)
+}
+
+// done is called internally by the control server to trigger the
+// doneCh to close which is listened to by the main process to cleanly
+// exit.
+func (s *RPCServer) done() {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ if s.DoneCh != nil {
+ close(s.DoneCh)
+ s.DoneCh = nil
+ }
+}
+
+// dispenseServer dispenses variousinterface implementations for Terraform.
+type controlServer struct {
+ server *RPCServer
+}
+
+func (c *controlServer) Quit(
+ null bool, response *struct{}) error {
+ // End the server
+ c.server.done()
+
+ // Always return true
+ *response = struct{}{}
+
+ return nil
+}
+
+// dispenseServer dispenses variousinterface implementations for Terraform.
+type dispenseServer struct {
+ broker *MuxBroker
+ plugins map[string]Plugin
+}
+
+func (d *dispenseServer) Dispense(
+ name string, response *uint32) error {
+ // Find the function to create this implementation
+ p, ok := d.plugins[name]
+ if !ok {
+ return fmt.Errorf("unknown plugin type: %s", name)
+ }
+
+ // Create the implementation first so we know if there is an error.
+ impl, err := p.Server(d.broker)
+ if err != nil {
+ // We turn the error into an errors error so that it works across RPC
+ return errors.New(err.Error())
+ }
+
+ // Reserve an ID for our implementation
+ id := d.broker.NextId()
+ *response = id
+
+ // Run the rest in a goroutine since it can only happen once this RPC
+ // call returns. We wait for a connection for the plugin implementation
+ // and serve it.
+ go func() {
+ conn, err := d.broker.Accept(id)
+ if err != nil {
+ log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err)
+ return
+ }
+
+ serve(conn, "Plugin", impl)
+ }()
+
+ return nil
+}
+
+func serve(conn io.ReadWriteCloser, name string, v interface{}) {
+ server := rpc.NewServer()
+ if err := server.RegisterName(name, v); err != nil {
+ log.Printf("[ERR] go-plugin: plugin dispense error: %s", err)
+ return
+ }
+
+ server.ServeConn(conn)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go
new file mode 100644
index 00000000..782a4e11
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/server.go
@@ -0,0 +1,235 @@
+package plugin
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "os"
+ "os/signal"
+ "runtime"
+ "strconv"
+ "sync/atomic"
+)
+
+// CoreProtocolVersion is the ProtocolVersion of the plugin system itself.
+// We will increment this whenever we change any protocol behavior. This
+// will invalidate any prior plugins but will at least allow us to iterate
+// on the core in a safe way. We will do our best to do this very
+// infrequently.
+const CoreProtocolVersion = 1
+
+// HandshakeConfig is the configuration used by client and servers to
+// handshake before starting a plugin connection. This is embedded by
+// both ServeConfig and ClientConfig.
+//
+// In practice, the plugin host creates a HandshakeConfig that is exported
+// and plugins then can easily consume it.
+type HandshakeConfig struct {
+ // ProtocolVersion is the version that clients must match on to
+ // agree they can communicate. This should match the ProtocolVersion
+ // set on ClientConfig when using a plugin.
+ ProtocolVersion uint
+
+ // MagicCookieKey and value are used as a very basic verification
+ // that a plugin is intended to be launched. This is not a security
+ // measure, just a UX feature. If the magic cookie doesn't match,
+ // we show human-friendly output.
+ MagicCookieKey string
+ MagicCookieValue string
+}
+
+// ServeConfig configures what sorts of plugins are served.
+type ServeConfig struct {
+ // HandshakeConfig is the configuration that must match clients.
+ HandshakeConfig
+
+ // Plugins are the plugins that are served.
+ Plugins map[string]Plugin
+
+ // TLSProvider is a function that returns a configured tls.Config.
+ TLSProvider func() (*tls.Config, error)
+}
+
+// Serve serves the plugins given by ServeConfig.
+//
+// Serve doesn't return until the plugin is done being executed. Any
+// errors will be outputted to the log.
+//
+// This is the method that plugins should call in their main() functions.
+func Serve(opts *ServeConfig) {
+ // Validate the handshake config
+ if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" {
+ fmt.Fprintf(os.Stderr,
+ "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+
+ "key or value was set. Please notify the plugin author and report\n"+
+ "this as a bug.\n")
+ os.Exit(1)
+ }
+
+ // First check the cookie
+ if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue {
+ fmt.Fprintf(os.Stderr,
+ "This binary is a plugin. These are not meant to be executed directly.\n"+
+ "Please execute the program that consumes these plugins, which will\n"+
+ "load any plugins automatically\n")
+ os.Exit(1)
+ }
+
+ // Logging goes to the original stderr
+ log.SetOutput(os.Stderr)
+
+ // Create our new stdout, stderr files. These will override our built-in
+ // stdout/stderr so that it works across the stream boundary.
+ stdout_r, stdout_w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
+ os.Exit(1)
+ }
+ stderr_r, stderr_w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
+ os.Exit(1)
+ }
+
+ // Register a listener so we can accept a connection
+ listener, err := serverListener()
+ if err != nil {
+ log.Printf("[ERR] plugin: plugin init: %s", err)
+ return
+ }
+
+ if opts.TLSProvider != nil {
+ tlsConfig, err := opts.TLSProvider()
+ if err != nil {
+ log.Printf("[ERR] plugin: plugin tls init: %s", err)
+ return
+ }
+ listener = tls.NewListener(listener, tlsConfig)
+ }
+ defer listener.Close()
+
+ // Create the channel to tell us when we're done
+ doneCh := make(chan struct{})
+
+ // Create the RPC server to dispense
+ server := &RPCServer{
+ Plugins: opts.Plugins,
+ Stdout: stdout_r,
+ Stderr: stderr_r,
+ DoneCh: doneCh,
+ }
+
+ // Output the address and service name to stdout so that core can bring it up.
+ log.Printf("[DEBUG] plugin: plugin address: %s %s\n",
+ listener.Addr().Network(), listener.Addr().String())
+ fmt.Printf("%d|%d|%s|%s\n",
+ CoreProtocolVersion,
+ opts.ProtocolVersion,
+ listener.Addr().Network(),
+ listener.Addr().String())
+ os.Stdout.Sync()
+
+ // Eat the interrupts
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, os.Interrupt)
+ go func() {
+ var count int32 = 0
+ for {
+ <-ch
+ newCount := atomic.AddInt32(&count, 1)
+ log.Printf(
+ "[DEBUG] plugin: received interrupt signal (count: %d). Ignoring.",
+ newCount)
+ }
+ }()
+
+ // Set our new out, err
+ os.Stdout = stdout_w
+ os.Stderr = stderr_w
+
+ // Serve
+ go server.Accept(listener)
+
+ // Wait for the graceful exit
+ <-doneCh
+}
+
+func serverListener() (net.Listener, error) {
+ if runtime.GOOS == "windows" {
+ return serverListener_tcp()
+ }
+
+ return serverListener_unix()
+}
+
+func serverListener_tcp() (net.Listener, error) {
+ minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ for port := minPort; port <= maxPort; port++ {
+ address := fmt.Sprintf("127.0.0.1:%d", port)
+ listener, err := net.Listen("tcp", address)
+ if err == nil {
+ return listener, nil
+ }
+ }
+
+ return nil, errors.New("Couldn't bind plugin TCP listener")
+}
+
+func serverListener_unix() (net.Listener, error) {
+ tf, err := ioutil.TempFile("", "plugin")
+ if err != nil {
+ return nil, err
+ }
+ path := tf.Name()
+
+ // Close the file and remove it because it has to not exist for
+ // the domain socket.
+ if err := tf.Close(); err != nil {
+ return nil, err
+ }
+ if err := os.Remove(path); err != nil {
+ return nil, err
+ }
+
+ l, err := net.Listen("unix", path)
+ if err != nil {
+ return nil, err
+ }
+
+ // Wrap the listener in rmListener so that the Unix domain socket file
+ // is removed on close.
+ return &rmListener{
+ Listener: l,
+ Path: path,
+ }, nil
+}
+
+// rmListener is an implementation of net.Listener that forwards most
+// calls to the listener but also removes a file as part of the close. We
+// use this to cleanup the unix domain socket on close.
+type rmListener struct {
+ net.Listener
+ Path string
+}
+
+func (l *rmListener) Close() error {
+ // Close the listener itself
+ if err := l.Listener.Close(); err != nil {
+ return err
+ }
+
+ // Remove the file
+ return os.Remove(l.Path)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go
new file mode 100644
index 00000000..033079ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go
@@ -0,0 +1,31 @@
+package plugin
+
+import (
+ "fmt"
+ "os"
+)
+
+// ServeMuxMap is the type that is used to configure ServeMux
+type ServeMuxMap map[string]*ServeConfig
+
+// ServeMux is like Serve, but serves multiple types of plugins determined
+// by the argument given on the command-line.
+//
+// This command doesn't return until the plugin is done being executed. Any
+// errors are logged or output to stderr.
+func ServeMux(m ServeMuxMap) {
+ if len(os.Args) != 2 {
+ fmt.Fprintf(os.Stderr,
+ "Invoked improperly. This is an internal command that shouldn't\n"+
+ "be manually invoked.\n")
+ os.Exit(1)
+ }
+
+ opts, ok := m[os.Args[1]]
+ if !ok {
+ fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1])
+ os.Exit(1)
+ }
+
+ Serve(opts)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go
new file mode 100644
index 00000000..1d547aaa
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/stream.go
@@ -0,0 +1,18 @@
+package plugin
+
+import (
+ "io"
+ "log"
+)
+
+func copyStream(name string, dst io.Writer, src io.Reader) {
+ if src == nil {
+ panic(name + ": src is nil")
+ }
+ if dst == nil {
+ panic(name + ": dst is nil")
+ }
+ if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
+ log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go
new file mode 100644
index 00000000..9086a1b4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/testing.go
@@ -0,0 +1,76 @@
+package plugin
+
+import (
+ "bytes"
+ "net"
+ "net/rpc"
+ "testing"
+)
+
+// The testing file contains test helpers that you can use outside of
+// this package for making it easier to test plugins themselves.
+
+// TestConn is a helper function for returning a client and server
+// net.Conn connected to each other.
+func TestConn(t *testing.T) (net.Conn, net.Conn) {
+ // Listen to any local port. This listener will be closed
+ // after a single connection is established.
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Start a goroutine to accept our client connection
+ var serverConn net.Conn
+ doneCh := make(chan struct{})
+ go func() {
+ defer close(doneCh)
+ defer l.Close()
+ var err error
+ serverConn, err = l.Accept()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ }()
+
+ // Connect to the server
+ clientConn, err := net.Dial("tcp", l.Addr().String())
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Wait for the server side to acknowledge it has connected
+ <-doneCh
+
+ return clientConn, serverConn
+}
+
+// TestRPCConn returns a rpc client and server connected to each other.
+func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) {
+ clientConn, serverConn := TestConn(t)
+
+ server := rpc.NewServer()
+ go server.ServeConn(serverConn)
+
+ client := rpc.NewClient(clientConn)
+ return client, server
+}
+
+// TestPluginRPCConn returns a plugin RPC client and server that are connected
+// together and configured.
+func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) {
+ // Create two net.Conns we can use to shuttle our control connection
+ clientConn, serverConn := TestConn(t)
+
+ // Start up the server
+ server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)}
+ go server.ServeConn(serverConn)
+
+ // Connect the client to the server
+ client, err := NewRPCClient(clientConn, ps)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ return client, server
+}
diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE
new file mode 100644
index 00000000..e87a115e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md
new file mode 100644
index 00000000..02565c8c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/README.md
@@ -0,0 +1,8 @@
+# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid)
+
+Generates UUID-format strings using high quality, purely random bytes. It can also parse UUID-format strings into their component bytes.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid).
diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go
new file mode 100644
index 00000000..ff9364c4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/uuid.go
@@ -0,0 +1,65 @@
+package uuid
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+)
+
+// GenerateRandomBytes is used to generate random bytes of given size.
+func GenerateRandomBytes(size int) ([]byte, error) {
+ buf := make([]byte, size)
+ if _, err := rand.Read(buf); err != nil {
+ return nil, fmt.Errorf("failed to read random bytes: %v", err)
+ }
+ return buf, nil
+}
+
+// GenerateUUID is used to generate a random UUID
+func GenerateUUID() (string, error) {
+ buf, err := GenerateRandomBytes(16)
+ if err != nil {
+ return "", err
+ }
+ return FormatUUID(buf)
+}
+
+func FormatUUID(buf []byte) (string, error) {
+ if len(buf) != 16 {
+ return "", fmt.Errorf("wrong length byte slice (%d)", len(buf))
+ }
+
+ return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
+ buf[0:4],
+ buf[4:6],
+ buf[6:8],
+ buf[8:10],
+ buf[10:16]), nil
+}
+
+func ParseUUID(uuid string) ([]byte, error) {
+ if len(uuid) != 36 {
+ return nil, fmt.Errorf("uuid string is wrong length")
+ }
+
+ hyph := []byte("-")
+
+ if uuid[8] != hyph[0] ||
+ uuid[13] != hyph[0] ||
+ uuid[18] != hyph[0] ||
+ uuid[23] != hyph[0] {
+ return nil, fmt.Errorf("uuid is improperly formatted")
+ }
+
+ hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36]
+
+ ret, err := hex.DecodeString(hexStr)
+ if err != nil {
+ return nil, err
+ }
+ if len(ret) != 16 {
+ return nil, fmt.Errorf("decoded hex is the wrong length")
+ }
+
+ return ret, nil
+}
diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md
new file mode 100644
index 00000000..6f3a15ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/README.md
@@ -0,0 +1,65 @@
+# Versioning Library for Go
+[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version)
+
+go-version is a library for parsing versions and version constraints,
+and verifying versions against a set of constraints. go-version
+can sort a collection of versions properly, handles prerelease/beta
+versions, can increment versions, etc.
+
+Versions used with go-version must follow [SemVer](http://semver.org/).
+
+## Installation and Usage
+
+Package documentation can be found on
+[GoDoc](http://godoc.org/github.com/hashicorp/go-version).
+
+Installation can be done with a normal `go get`:
+
+```
+$ go get github.com/hashicorp/go-version
+```
+
+#### Version Parsing and Comparison
+
+```go
+v1, err := version.NewVersion("1.2")
+v2, err := version.NewVersion("1.5+metadata")
+
+// Comparison example. There is also GreaterThan, Equal, and just
+// a simple Compare that returns an int allowing easy >=, <=, etc.
+if v1.LessThan(v2) {
+ fmt.Printf("%s is less than %s", v1, v2)
+}
+```
+
+#### Version Constraints
+
+```go
+v1, err := version.NewVersion("1.2")
+
+// Constraints example.
+constraints, err := version.NewConstraint(">= 1.0, < 1.4")
+if constraints.Check(v1) {
+ fmt.Printf("%s satisfies constraints %s", v1, constraints)
+}
+```
+
+#### Version Sorting
+
+```go
+versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"}
+versions := make([]*version.Version, len(versionsRaw))
+for i, raw := range versionsRaw {
+ v, _ := version.NewVersion(raw)
+ versions[i] = v
+}
+
+// After this, the versions are properly sorted
+sort.Sort(version.Collection(versions))
+```
+
+## Issues and Contributing
+
+If you find an issue with this library, please report an issue. If you'd
+like, we welcome any contributions. Fork this library and submit a pull
+request.
diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go
new file mode 100644
index 00000000..8c73df06
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/constraint.go
@@ -0,0 +1,178 @@
+package version
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// Constraint represents a single constraint for a version, such as
+// ">= 1.0".
+type Constraint struct {
+ f constraintFunc
+ check *Version
+ original string
+}
+
+// Constraints is a slice of constraints. We make a custom type so that
+// we can add methods to it.
+type Constraints []*Constraint
+
+type constraintFunc func(v, c *Version) bool
+
+var constraintOperators map[string]constraintFunc
+
+var constraintRegexp *regexp.Regexp
+
+func init() {
+ constraintOperators = map[string]constraintFunc{
+ "": constraintEqual,
+ "=": constraintEqual,
+ "!=": constraintNotEqual,
+ ">": constraintGreaterThan,
+ "<": constraintLessThan,
+ ">=": constraintGreaterThanEqual,
+ "<=": constraintLessThanEqual,
+ "~>": constraintPessimistic,
+ }
+
+ ops := make([]string, 0, len(constraintOperators))
+ for k := range constraintOperators {
+ ops = append(ops, regexp.QuoteMeta(k))
+ }
+
+ constraintRegexp = regexp.MustCompile(fmt.Sprintf(
+ `^\s*(%s)\s*(%s)\s*$`,
+ strings.Join(ops, "|"),
+ VersionRegexpRaw))
+}
+
+// NewConstraint will parse one or more constraints from the given
+// constraint string. The string must be a comma-separated list of
+// constraints.
+func NewConstraint(v string) (Constraints, error) {
+ vs := strings.Split(v, ",")
+ result := make([]*Constraint, len(vs))
+ for i, single := range vs {
+ c, err := parseSingle(single)
+ if err != nil {
+ return nil, err
+ }
+
+ result[i] = c
+ }
+
+ return Constraints(result), nil
+}
+
+// Check tests if a version satisfies all the constraints.
+func (cs Constraints) Check(v *Version) bool {
+ for _, c := range cs {
+ if !c.Check(v) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Returns the string format of the constraints
+func (cs Constraints) String() string {
+ csStr := make([]string, len(cs))
+ for i, c := range cs {
+ csStr[i] = c.String()
+ }
+
+ return strings.Join(csStr, ",")
+}
+
+// Check tests if a constraint is validated by the given version.
+func (c *Constraint) Check(v *Version) bool {
+ return c.f(v, c.check)
+}
+
+func (c *Constraint) String() string {
+ return c.original
+}
+
+func parseSingle(v string) (*Constraint, error) {
+ matches := constraintRegexp.FindStringSubmatch(v)
+ if matches == nil {
+ return nil, fmt.Errorf("Malformed constraint: %s", v)
+ }
+
+ check, err := NewVersion(matches[2])
+ if err != nil {
+ return nil, err
+ }
+
+ return &Constraint{
+ f: constraintOperators[matches[1]],
+ check: check,
+ original: v,
+ }, nil
+}
+
+//-------------------------------------------------------------------
+// Constraint functions
+//-------------------------------------------------------------------
+
+func constraintEqual(v, c *Version) bool {
+ return v.Equal(c)
+}
+
+func constraintNotEqual(v, c *Version) bool {
+ return !v.Equal(c)
+}
+
+func constraintGreaterThan(v, c *Version) bool {
+ return v.Compare(c) == 1
+}
+
+func constraintLessThan(v, c *Version) bool {
+ return v.Compare(c) == -1
+}
+
+func constraintGreaterThanEqual(v, c *Version) bool {
+ return v.Compare(c) >= 0
+}
+
+func constraintLessThanEqual(v, c *Version) bool {
+ return v.Compare(c) <= 0
+}
+
+func constraintPessimistic(v, c *Version) bool {
+ // If the version being checked is naturally less than the constraint, then there
+ // is no way for the version to be valid against the constraint
+ if v.LessThan(c) {
+ return false
+ }
+ // We'll use this more than once, so grab the length now so it's a little cleaner
+ // to write the later checks
+ cs := len(c.segments)
+
+ // If the version being checked has less specificity than the constraint, then there
+ // is no way for the version to be valid against the constraint
+ if cs > len(v.segments) {
+ return false
+ }
+
+ // Check the segments in the constraint against those in the version. If the version
+ // being checked, at any point, does not have the same values in each index of the
+ // constraints segments, then it cannot be valid against the constraint.
+ for i := 0; i < c.si-1; i++ {
+ if v.segments[i] != c.segments[i] {
+ return false
+ }
+ }
+
+ // Check the last part of the segment in the constraint. If the version segment at
+ // this index is less than the constraints segment at this index, then it cannot
+ // be valid against the constraint
+ if c.segments[cs-1] > v.segments[cs-1] {
+ return false
+ }
+
+ // If nothing has rejected the version by now, it's valid
+ return true
+}
diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go
new file mode 100644
index 00000000..dfe509ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/version.go
@@ -0,0 +1,322 @@
+package version
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// The compiled regular expression used to test the validity of a version.
+var versionRegexp *regexp.Regexp
+
+// The raw regular expression string used for testing the validity
+// of a version.
+const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
+ `(-?([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `?`
+
+// Version represents a single version.
+type Version struct {
+ metadata string
+ pre string
+ segments []int64
+ si int
+}
+
+func init() {
+ versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
+}
+
+// NewVersion parses the given version and returns a new
+// Version.
+func NewVersion(v string) (*Version, error) {
+ matches := versionRegexp.FindStringSubmatch(v)
+ if matches == nil {
+ return nil, fmt.Errorf("Malformed version: %s", v)
+ }
+ segmentsStr := strings.Split(matches[1], ".")
+ segments := make([]int64, len(segmentsStr))
+ si := 0
+ for i, str := range segmentsStr {
+ val, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing version: %s", err)
+ }
+
+ segments[i] = int64(val)
+ si++
+ }
+
+ // Even though we could support more than three segments, if we
+ // got less than three, pad it with 0s. This is to cover the basic
+ // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum
+ for i := len(segments); i < 3; i++ {
+ segments = append(segments, 0)
+ }
+
+ return &Version{
+ metadata: matches[7],
+ pre: matches[4],
+ segments: segments,
+ si: si,
+ }, nil
+}
+
+// Must is a helper that wraps a call to a function returning (*Version, error)
+// and panics if error is non-nil.
+func Must(v *Version, err error) *Version {
+ if err != nil {
+ panic(err)
+ }
+
+ return v
+}
+
+// Compare compares this version to another version. This
+// returns -1, 0, or 1 if this version is smaller, equal,
+// or larger than the other version, respectively.
+//
+// If you want boolean results, use the LessThan, Equal,
+// or GreaterThan methods.
+func (v *Version) Compare(other *Version) int {
+ // A quick, efficient equality check
+ if v.String() == other.String() {
+ return 0
+ }
+
+ segmentsSelf := v.Segments64()
+ segmentsOther := other.Segments64()
+
+ // If the segments are the same, we must compare on prerelease info
+ if reflect.DeepEqual(segmentsSelf, segmentsOther) {
+ preSelf := v.Prerelease()
+ preOther := other.Prerelease()
+ if preSelf == "" && preOther == "" {
+ return 0
+ }
+ if preSelf == "" {
+ return 1
+ }
+ if preOther == "" {
+ return -1
+ }
+
+ return comparePrereleases(preSelf, preOther)
+ }
+
+ // Get the highest specificity (hS), or if they're equal, just use segmentSelf length
+ lenSelf := len(segmentsSelf)
+ lenOther := len(segmentsOther)
+ hS := lenSelf
+ if lenSelf < lenOther {
+ hS = lenOther
+ }
+ // Compare the segments
+ // Because a constraint could have more/less specificity than the version it's
+ // checking, we need to account for a lopsided or jagged comparison
+ for i := 0; i < hS; i++ {
+ if i > lenSelf-1 {
+ // This means Self had the lower specificity
+ // Check to see if the remaining segments in Other are all zeros
+ if !allZero(segmentsOther[i:]) {
+ // if not, it means that Other has to be greater than Self
+ return -1
+ }
+ break
+ } else if i > lenOther-1 {
+ // this means Other had the lower specificity
+ // Check to see if the remaining segments in Self are all zeros -
+ if !allZero(segmentsSelf[i:]) {
+ //if not, it means that Self has to be greater than Other
+ return 1
+ }
+ break
+ }
+ lhs := segmentsSelf[i]
+ rhs := segmentsOther[i]
+ if lhs == rhs {
+ continue
+ } else if lhs < rhs {
+ return -1
+ }
+ // Otherwis, rhs was > lhs, they're not equal
+ return 1
+ }
+
+ // if we got this far, they're equal
+ return 0
+}
+
+func allZero(segs []int64) bool {
+ for _, s := range segs {
+ if s != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func comparePart(preSelf string, preOther string) int {
+ if preSelf == preOther {
+ return 0
+ }
+
+ selfNumeric := true
+ _, err := strconv.ParseInt(preSelf, 10, 64)
+ if err != nil {
+ selfNumeric = false
+ }
+
+ otherNumeric := true
+ _, err = strconv.ParseInt(preOther, 10, 64)
+ if err != nil {
+ otherNumeric = false
+ }
+
+ // if a part is empty, we use the other to decide
+ if preSelf == "" {
+ if otherNumeric {
+ return -1
+ }
+ return 1
+ }
+
+ if preOther == "" {
+ if selfNumeric {
+ return 1
+ }
+ return -1
+ }
+
+ if selfNumeric && !otherNumeric {
+ return -1
+ } else if !selfNumeric && otherNumeric {
+ return 1
+ } else if preSelf > preOther {
+ return 1
+ }
+
+ return -1
+}
+
+func comparePrereleases(v string, other string) int {
+ // the same pre release!
+ if v == other {
+ return 0
+ }
+
+ // split both pre releases for analyse their parts
+ selfPreReleaseMeta := strings.Split(v, ".")
+ otherPreReleaseMeta := strings.Split(other, ".")
+
+ selfPreReleaseLen := len(selfPreReleaseMeta)
+ otherPreReleaseLen := len(otherPreReleaseMeta)
+
+ biggestLen := otherPreReleaseLen
+ if selfPreReleaseLen > otherPreReleaseLen {
+ biggestLen = selfPreReleaseLen
+ }
+
+ // loop for parts to find the first difference
+ for i := 0; i < biggestLen; i = i + 1 {
+ partSelfPre := ""
+ if i < selfPreReleaseLen {
+ partSelfPre = selfPreReleaseMeta[i]
+ }
+
+ partOtherPre := ""
+ if i < otherPreReleaseLen {
+ partOtherPre = otherPreReleaseMeta[i]
+ }
+
+ compare := comparePart(partSelfPre, partOtherPre)
+ // if parts are equals, continue the loop
+ if compare != 0 {
+ return compare
+ }
+ }
+
+ return 0
+}
+
+// Equal tests if two versions are equal.
+func (v *Version) Equal(o *Version) bool {
+ return v.Compare(o) == 0
+}
+
+// GreaterThan tests if this version is greater than another version.
+func (v *Version) GreaterThan(o *Version) bool {
+ return v.Compare(o) > 0
+}
+
+// LessThan tests if this version is less than another version.
+func (v *Version) LessThan(o *Version) bool {
+ return v.Compare(o) < 0
+}
+
+// Metadata returns any metadata that was part of the version
+// string.
+//
+// Metadata is anything that comes after the "+" in the version.
+// For example, with "1.2.3+beta", the metadata is "beta".
+func (v *Version) Metadata() string {
+ return v.metadata
+}
+
+// Prerelease returns any prerelease data that is part of the version,
+// or blank if there is no prerelease data.
+//
+// Prerelease information is anything that comes after the "-" in the
+// version (but before any metadata). For example, with "1.2.3-beta",
+// the prerelease information is "beta".
+func (v *Version) Prerelease() string {
+ return v.pre
+}
+
+// Segments returns the numeric segments of the version as a slice of ints.
+//
+// This excludes any metadata or pre-release information. For example,
+// for a version "1.2.3-beta", segments will return a slice of
+// 1, 2, 3.
+func (v *Version) Segments() []int {
+ segmentSlice := make([]int, len(v.segments))
+ for i, v := range v.segments {
+ segmentSlice[i] = int(v)
+ }
+ return segmentSlice
+}
+
+// Segments64 returns the numeric segments of the version as a slice of int64s.
+//
+// This excludes any metadata or pre-release information. For example,
+// for a version "1.2.3-beta", segments will return a slice of
+// 1, 2, 3.
+func (v *Version) Segments64() []int64 {
+ return v.segments
+}
+
+// String returns the full version string included pre-release
+// and metadata information.
+func (v *Version) String() string {
+ var buf bytes.Buffer
+ fmtParts := make([]string, len(v.segments))
+ for i, s := range v.segments {
+ // We can ignore err here since we've pre-parsed the values in segments
+ str := strconv.FormatInt(s, 10)
+ fmtParts[i] = str
+ }
+ fmt.Fprintf(&buf, strings.Join(fmtParts, "."))
+ if v.pre != "" {
+ fmt.Fprintf(&buf, "-%s", v.pre)
+ }
+ if v.metadata != "" {
+ fmt.Fprintf(&buf, "+%s", v.metadata)
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go
new file mode 100644
index 00000000..cc888d43
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/version_collection.go
@@ -0,0 +1,17 @@
+package version
+
+// Collection is a type that implements the sort.Interface interface
+// so that versions can be sorted.
+type Collection []*Version
+
+func (v Collection) Len() int {
+ return len(v)
+}
+
+func (v Collection) Less(i, j int) bool {
+ return v[i].LessThan(v[j])
+}
+
+func (v Collection) Swap(i, j int) {
+ v[i], v[j] = v[j], v[i]
+}
diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md
new file mode 100644
index 00000000..c8223326
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/README.md
@@ -0,0 +1,125 @@
+# HCL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
+
+HCL (HashiCorp Configuration Language) is a configuration language built
+by HashiCorp. The goal of HCL is to build a structured configuration language
+that is both human and machine friendly for use with command-line tools, but
+specifically targeted towards DevOps tools, servers, etc.
+
+HCL is also fully JSON compatible. That is, JSON can be used as completely
+valid input to a system expecting HCL. This helps makes systems
+interoperable with other systems.
+
+HCL is heavily inspired by
+[libucl](https://github.com/vstakhov/libucl),
+nginx configuration, and others similar.
+
+## Why?
+
+A common question when viewing HCL is to ask the question: why not
+JSON, YAML, etc.?
+
+Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
+used a variety of configuration languages from full programming languages
+such as Ruby to complete data structure languages such as JSON. What we
+learned is that some people wanted human-friendly configuration languages
+and some people wanted machine-friendly languages.
+
+JSON fits a nice balance in this, but is fairly verbose and most
+importantly doesn't support comments. With YAML, we found that beginners
+had a really hard time determining what the actual structure was, and
+ended up guessing more often than not whether to use a hyphen, colon, etc.
+in order to represent some configuration key.
+
+Full programming languages such as Ruby enable complex behavior
+a configuration language shouldn't usually allow, and also forces
+people to learn some set of Ruby.
+
+Because of this, we decided to create our own configuration language
+that is JSON-compatible. Our configuration language (HCL) is designed
+to be written and modified by humans. The API for HCL allows JSON
+as an input so that it is also machine-friendly (machines can generate
+JSON instead of trying to generate HCL).
+
+Our goal with HCL is not to alienate other configuration languages.
+It is instead to provide HCL as a specialized language for our tools,
+and JSON as the interoperability layer.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammar is listed here.
+
+ * Single line comments start with `#` or `//`
+
+ * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
+ are not allowed. A multi-line comment (also known as a block comment)
+ terminates at the first `*/` found.
+
+ * Values are assigned with the syntax `key = value` (whitespace doesn't
+ matter). The value can be any primitive: a string, number, boolean,
+ object, or list.
+
+ * Strings are double-quoted and can contain any UTF-8 characters.
+ Example: `"Hello, World"`
+
+ * Multi-line strings start with `<<EOF` at the end of a line, and end
+ with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
+ Any text may be used in place of `EOF`. Example:
+```
+<<FOO
+hello
+world
+FOO
+```
+
+ * Numbers are assumed to be base 10. If you prefix a number with 0x,
+ it is treated as a hexadecimal. If it is prefixed with 0, it is
+ treated as an octal. Numbers can be in scientific notation: "1e10".
+
+ * Boolean values: `true`, `false`
+
+ * Arrays can be made by wrapping it in `[]`. Example:
+ `["foo", "bar", 42]`. Arrays can contain primitives,
+ other arrays, and objects. As an alternative, lists
+ of objects can be created with repeated blocks, using
+ this structure:
+
+ ```hcl
+ service {
+ key = "value"
+ }
+
+ service {
+ key = "value"
+ }
+ ```
+
+Objects and nested objects are created using the structure shown below:
+
+```
+variable "ami" {
+ description = "the AMI to use"
+}
+```
+This would be equivalent to the following json:
+``` json
+{
+ "variable": {
+ "ami": {
+ "description": "the AMI to use"
+ }
+ }
+}
+```
+
+## Thanks
+
+Thanks to:
+
+ * [@vstakhov](https://github.com/vstakhov) - The original libucl parser
+ and syntax that HCL was based off of.
+
+ * [@fatih](https://github.com/fatih) - The rewritten HCL parser
+ in pure Go (no goyacc) and support for a printer.
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
new file mode 100644
index 00000000..0b39c1b9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -0,0 +1,724 @@
+package hcl
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// This is the tag to use with structures to have settings for HCL
+const tagName = "hcl"
+
+var (
+ // nodeType holds a reference to the type of ast.Node
+ nodeType reflect.Type = findNodeType()
+)
+
+// Unmarshal accepts a byte slice as input and writes the
+// data to the value pointed to by v.
+func Unmarshal(bs []byte, v interface{}) error {
+ root, err := parse(bs)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(v, root)
+}
+
+// Decode reads the given input and decodes it into the structure
+// given by `out`.
+func Decode(out interface{}, in string) error {
+ obj, err := Parse(in)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(out, obj)
+}
+
+// DecodeObject is a lower-level version of Decode. It decodes a
+// raw Object into the given output.
+func DecodeObject(out interface{}, n ast.Node) error {
+ val := reflect.ValueOf(out)
+ if val.Kind() != reflect.Ptr {
+ return errors.New("result must be a pointer")
+ }
+
+ // If we have the file, we really decode the root node
+ if f, ok := n.(*ast.File); ok {
+ n = f.Node
+ }
+
+ var d decoder
+ return d.decode("root", n, val.Elem())
+}
+
+type decoder struct {
+ stack []reflect.Kind
+}
+
+func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
+ k := result
+
+ // If we have an interface with a valid value, we use that
+ // for the check.
+ if result.Kind() == reflect.Interface {
+ elem := result.Elem()
+ if elem.IsValid() {
+ k = elem
+ }
+ }
+
+ // Push current onto stack unless it is an interface.
+ if k.Kind() != reflect.Interface {
+ d.stack = append(d.stack, k.Kind())
+
+ // Schedule a pop
+ defer func() {
+ d.stack = d.stack[:len(d.stack)-1]
+ }()
+ }
+
+ switch k.Kind() {
+ case reflect.Bool:
+ return d.decodeBool(name, node, result)
+ case reflect.Float64:
+ return d.decodeFloat(name, node, result)
+ case reflect.Int, reflect.Int32, reflect.Int64:
+ return d.decodeInt(name, node, result)
+ case reflect.Interface:
+ // When we see an interface, we make our own thing
+ return d.decodeInterface(name, node, result)
+ case reflect.Map:
+ return d.decodeMap(name, node, result)
+ case reflect.Ptr:
+ return d.decodePtr(name, node, result)
+ case reflect.Slice:
+ return d.decodeSlice(name, node, result)
+ case reflect.String:
+ return d.decodeString(name, node, result)
+ case reflect.Struct:
+ return d.decodeStruct(name, node, result)
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
+ }
+ }
+}
+
+func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.BOOL {
+ v, err := strconv.ParseBool(n.Token.Text)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.FLOAT {
+ v, err := strconv.ParseFloat(n.Token.Text, 64)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ v, err := strconv.ParseInt(n.Token.Text, 0, 0)
+ if err != nil {
+ return err
+ }
+
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
+ return nil
+ case token.STRING:
+ v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
+ if err != nil {
+ return err
+ }
+
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
+ // When we see an ast.Node, we retain the value to enable deferred decoding.
+ // Very useful in situations where we want to preserve ast.Node information
+ // like Pos
+ if result.Type() == nodeType && result.CanSet() {
+ result.Set(reflect.ValueOf(node))
+ return nil
+ }
+
+ var set reflect.Value
+ redecode := true
+
+ // For testing types, ObjectType should just be treated as a list. We
+ // set this to a temporary var because we want to pass in the real node.
+ testNode := node
+ if ot, ok := node.(*ast.ObjectType); ok {
+ testNode = ot.List
+ }
+
+ switch n := testNode.(type) {
+ case *ast.ObjectList:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
+ set = result
+ }
+ case *ast.ObjectType:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
+ set = result
+ }
+ case *ast.ListType:
+ var temp []interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
+ set = result
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.BOOL:
+ var result bool
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.FLOAT:
+ var result float64
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.NUMBER:
+ var result int
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.STRING, token.HEREDOC:
+ set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
+ }
+ }
+ default:
+ return fmt.Errorf(
+ "%s: cannot decode into interface: %T",
+ name, node)
+ }
+
+ // Set the result to what its supposed to be, then reset
+ // result so we don't reflect into this method anymore.
+ result.Set(set)
+
+ if redecode {
+ // Revisit the node so that we can use the newly instantiated
+ // thing and populate it.
+ if err := d.decode(name, node, result); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
+ if item, ok := node.(*ast.ObjectItem); ok {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ n, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
+ }
+ }
+
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ resultKeyType := resultType.Key()
+ if resultKeyType.Kind() != reflect.String {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Make a map if it is nil
+ resultMap := result
+ if result.IsNil() {
+ resultMap = reflect.MakeMap(
+ reflect.MapOf(resultKeyType, resultElemType))
+ }
+
+ // Go through each element and decode it.
+ done := make(map[string]struct{})
+ for _, item := range n.Items {
+ if item.Val == nil {
+ continue
+ }
+
+ // github.com/hashicorp/terraform/issue/5740
+ if len(item.Keys) == 0 {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Get the key we're dealing with, which is the first item
+ keyStr := item.Keys[0].Token.Value().(string)
+
+ // If we've already processed this key, then ignore it
+ if _, ok := done[keyStr]; ok {
+ continue
+ }
+
+ // Determine the value. If we have more than one key, then we
+ // get the objectlist of only these keys.
+ itemVal := item.Val
+ if len(item.Keys) > 1 {
+ itemVal = n.Filter(keyStr)
+ done[keyStr] = struct{}{}
+ }
+
+ // Make the field name
+ fieldName := fmt.Sprintf("%s.%s", name, keyStr)
+
+ // Get the key/value as reflection values
+ key := reflect.ValueOf(keyStr)
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // If we have a pre-existing value in the map, use that
+ oldVal := resultMap.MapIndex(key)
+ if oldVal.IsValid() {
+ val.Set(oldVal)
+ }
+
+ // Decode!
+ if err := d.decode(fieldName, itemVal, val); err != nil {
+ return err
+ }
+
+ // Set the value on the map
+ resultMap.SetMapIndex(key, val)
+ }
+
+ // Set the final map if we can
+ set.Set(resultMap)
+ return nil
+}
+
+func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ val := reflect.New(resultElemType)
+ if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
+ return err
+ }
+
+ result.Set(val)
+ return nil
+}
+
+func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+ // Create the slice if it isn't nil
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ if result.IsNil() {
+ resultSliceType := reflect.SliceOf(resultElemType)
+ result = reflect.MakeSlice(
+ resultSliceType, 0, 0)
+ }
+
+ // Figure out the items we'll be copying into the slice
+ var items []ast.Node
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ items = make([]ast.Node, len(n.Items))
+ for i, item := range n.Items {
+ items[i] = item
+ }
+ case *ast.ObjectType:
+ items = []ast.Node{n}
+ case *ast.ListType:
+ items = n.List
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("unknown slice type: %T", node),
+ }
+ }
+
+ for i, item := range items {
+ fieldName := fmt.Sprintf("%s[%d]", name, i)
+
+ // Decode
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // if item is an object that was decoded from ambiguous JSON and
+ // flattened, make sure it's expanded if it needs to decode into a
+ // defined structure.
+ item := expandObject(item, val)
+
+ if err := d.decode(fieldName, item, val); err != nil {
+ return err
+ }
+
+ // Append it onto the slice
+ result = reflect.Append(result, val)
+ }
+
+ set.Set(result)
+ return nil
+}
+
+// expandObject detects if an ambiguous JSON object was flattened to a List which
+// should be decoded into a struct, and expands the ast to properly deocode.
+func expandObject(node ast.Node, result reflect.Value) ast.Node {
+ item, ok := node.(*ast.ObjectItem)
+ if !ok {
+ return node
+ }
+
+ elemType := result.Type()
+
+ // our target type must be a struct
+ switch elemType.Kind() {
+ case reflect.Ptr:
+ switch elemType.Elem().Kind() {
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+
+ // A list value will have a key and field name. If it had more fields,
+ // it wouldn't have been flattened.
+ if len(item.Keys) != 2 {
+ return node
+ }
+
+ keyToken := item.Keys[0].Token
+ item.Keys = item.Keys[1:]
+
+ // we need to un-flatten the ast enough to decode
+ newNode := &ast.ObjectItem{
+ Keys: []*ast.ObjectKey{
+ &ast.ObjectKey{
+ Token: keyToken,
+ },
+ },
+ Val: &ast.ObjectType{
+ List: &ast.ObjectList{
+ Items: []*ast.ObjectItem{item},
+ },
+ },
+ }
+
+ return newNode
+}
+
+func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
+ return nil
+ case token.STRING, token.HEREDOC:
+ result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type for string %T", name, node),
+ }
+}
+
+func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
+ var item *ast.ObjectItem
+ if it, ok := node.(*ast.ObjectItem); ok {
+ item = it
+ node = it.Val
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ // Handle the special case where the object itself is a literal. Previously
+ // the yacc parser would always ensure top-level elements were arrays. The new
+ // parser does not make the same guarantees, thus we need to convert any
+ // top-level literal elements into a list.
+ if _, ok := node.(*ast.LiteralType); ok && item != nil {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ list, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
+ }
+ }
+
+ // This slice will keep track of all the structs we'll be decoding.
+ // There can be more than one struct if there are embedded structs
+ // that are squashed.
+ structs := make([]reflect.Value, 1, 5)
+ structs[0] = result
+
+ // Compile the list of all the fields that we're going to be decoding
+ // from all the structs.
+ fields := make(map[*reflect.StructField]reflect.Value)
+ for len(structs) > 0 {
+ structVal := structs[0]
+ structs = structs[1:]
+
+ structType := structVal.Type()
+ for i := 0; i < structType.NumField(); i++ {
+ fieldType := structType.Field(i)
+ tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
+
+ // Ignore fields with tag name "-"
+ if tagParts[0] == "-" {
+ continue
+ }
+
+ if fieldType.Anonymous {
+ fieldKind := fieldType.Type.Kind()
+ if fieldKind != reflect.Struct {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unsupported type to struct: %s",
+ fieldType.Name, fieldKind),
+ }
+ }
+
+ // We have an embedded field. We "squash" the fields down
+ // if specified in the tag.
+ squash := false
+ for _, tag := range tagParts[1:] {
+ if tag == "squash" {
+ squash = true
+ break
+ }
+ }
+
+ if squash {
+ structs = append(
+ structs, result.FieldByName(fieldType.Name))
+ continue
+ }
+ }
+
+ // Normal struct field, store it away
+ fields[&fieldType] = structVal.Field(i)
+ }
+ }
+
+ usedKeys := make(map[string]struct{})
+ decodedFields := make([]string, 0, len(fields))
+ decodedFieldsVal := make([]reflect.Value, 0)
+ unusedKeysVal := make([]reflect.Value, 0)
+ for fieldType, field := range fields {
+ if !field.IsValid() {
+ // This should never happen
+ panic("field is not valid")
+ }
+
+ // If we can't set the field, then it is unexported or something,
+ // and we just continue onwards.
+ if !field.CanSet() {
+ continue
+ }
+
+ fieldName := fieldType.Name
+
+ tagValue := fieldType.Tag.Get(tagName)
+ tagParts := strings.SplitN(tagValue, ",", 2)
+ if len(tagParts) >= 2 {
+ switch tagParts[1] {
+ case "decodedFields":
+ decodedFieldsVal = append(decodedFieldsVal, field)
+ continue
+ case "key":
+ if item == nil {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: %s asked for 'key', impossible",
+ name, fieldName),
+ }
+ }
+
+ field.SetString(item.Keys[0].Token.Value().(string))
+ continue
+ case "unusedKeys":
+ unusedKeysVal = append(unusedKeysVal, field)
+ continue
+ }
+ }
+
+ if tagParts[0] != "" {
+ fieldName = tagParts[0]
+ }
+
+ // Determine the element we'll use to decode. If it is a single
+ // match (only object with the field), then we decode it exactly.
+ // If it is a prefix match, then we decode the matches.
+ filter := list.Filter(fieldName)
+
+ prefixMatches := filter.Children()
+ matches := filter.Elem()
+ if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
+ continue
+ }
+
+ // Track the used key
+ usedKeys[fieldName] = struct{}{}
+
+ // Create the field name and decode. We range over the elements
+ // because we actually want the value.
+ fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+ if len(prefixMatches.Items) > 0 {
+ if err := d.decode(fieldName, prefixMatches, field); err != nil {
+ return err
+ }
+ }
+ for _, match := range matches.Items {
+ var decodeNode ast.Node = match.Val
+ if ot, ok := decodeNode.(*ast.ObjectType); ok {
+ decodeNode = &ast.ObjectList{Items: ot.List.Items}
+ }
+
+ if err := d.decode(fieldName, decodeNode, field); err != nil {
+ return err
+ }
+ }
+
+ decodedFields = append(decodedFields, fieldType.Name)
+ }
+
+ if len(decodedFieldsVal) > 0 {
+ // Sort it so that it is deterministic
+ sort.Strings(decodedFields)
+
+ for _, v := range decodedFieldsVal {
+ v.Set(reflect.ValueOf(decodedFields))
+ }
+ }
+
+ return nil
+}
+
+// findNodeType returns the type of ast.Node
+func findNodeType() reflect.Type {
+ var nodeContainer struct {
+ Node ast.Node
+ }
+ value := reflect.ValueOf(nodeContainer).FieldByName("Node")
+ return value.Type()
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go
new file mode 100644
index 00000000..575a20b5
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl.go
@@ -0,0 +1,11 @@
+// Package hcl decodes HCL into usable Go structures.
+//
+// hcl input can come in either pure HCL format or JSON format.
+// It can be parsed into an AST, and then decoded into a structure,
+// or it can be decoded directly from a string into a structure.
+//
+// If you choose to parse HCL into a raw AST, the benefit is that you
+// can write custom visitor implementations to implement custom
+// semantic checks. By default, HCL does not perform any semantic
+// checks.
+package hcl
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
new file mode 100644
index 00000000..6e5ef654
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
@@ -0,0 +1,219 @@
+// Package ast declares the types used to represent syntax trees for HCL
+// (HashiCorp Configuration Language)
+package ast
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Node is an element in the abstract syntax tree.
+type Node interface {
+ node()
+ Pos() token.Pos
+}
+
+func (File) node() {}
+func (ObjectList) node() {}
+func (ObjectKey) node() {}
+func (ObjectItem) node() {}
+func (Comment) node() {}
+func (CommentGroup) node() {}
+func (ObjectType) node() {}
+func (LiteralType) node() {}
+func (ListType) node() {}
+
+// File represents a single HCL file
+type File struct {
+ Node Node // usually a *ObjectList
+ Comments []*CommentGroup // list of all comments in the source
+}
+
+func (f *File) Pos() token.Pos {
+ return f.Node.Pos()
+}
+
+// ObjectList represents a list of ObjectItems. An HCL file itself is an
+// ObjectList.
+type ObjectList struct {
+ Items []*ObjectItem
+}
+
+func (o *ObjectList) Add(item *ObjectItem) {
+ o.Items = append(o.Items, item)
+}
+
+// Filter filters out the objects with the given key list as a prefix.
+//
+// The returned list of objects contain ObjectItems where the keys have
+// this prefix already stripped off. This might result in objects with
+// zero-length key lists if they have no children.
+//
+// If no matches are found, an empty ObjectList (non-nil) is returned.
+func (o *ObjectList) Filter(keys ...string) *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ // If there aren't enough keys, then ignore this
+ if len(item.Keys) < len(keys) {
+ continue
+ }
+
+ match := true
+ for i, key := range item.Keys[:len(keys)] {
+ key := key.Token.Value().(string)
+ if key != keys[i] && !strings.EqualFold(key, keys[i]) {
+ match = false
+ break
+ }
+ }
+ if !match {
+ continue
+ }
+
+ // Strip off the prefix from the children
+ newItem := *item
+ newItem.Keys = newItem.Keys[len(keys):]
+ result.Add(&newItem)
+ }
+
+ return &result
+}
+
+// Children returns further nested objects (key length > 0) within this
+// ObjectList. This should be used with Filter to get at child items.
+func (o *ObjectList) Children() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) > 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+// Elem returns items in the list that are direct element assignments
+// (key length == 0). This should be used with Filter to get at elements.
+func (o *ObjectList) Elem() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) == 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+func (o *ObjectList) Pos() token.Pos {
+ // always returns the uninitiliazed position
+ return o.Items[0].Pos()
+}
+
+// ObjectItem represents a HCL Object Item. An item is represented with a key
+// (or keys). It can be an assignment or an object (both normal and nested)
+type ObjectItem struct {
+ // keys is only one length long if it's of type assignment. If it's a
+ // nested object it can be larger than one. In that case "assign" is
+ // invalid as there is no assignments for a nested object.
+ Keys []*ObjectKey
+
+ // assign contains the position of "=", if any
+ Assign token.Pos
+
+ // val is the item itself. It can be an object,list, number, bool or a
+ // string. If key length is larger than one, val can be only of type
+ // Object.
+ Val Node
+
+ LeadComment *CommentGroup // associated lead comment
+ LineComment *CommentGroup // associated line comment
+}
+
+func (o *ObjectItem) Pos() token.Pos {
+ // I'm not entirely sure what causes this, but removing this causes
+ // a test failure. We should investigate at some point.
+ if len(o.Keys) == 0 {
+ return token.Pos{}
+ }
+
+ return o.Keys[0].Pos()
+}
+
+// ObjectKeys are either an identifier or of type string.
+type ObjectKey struct {
+ Token token.Token
+}
+
+func (o *ObjectKey) Pos() token.Pos {
+ return o.Token.Pos
+}
+
+// LiteralType represents a literal of basic type. Valid types are:
+// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
+type LiteralType struct {
+ Token token.Token
+
+ // comment types, only used when in a list
+ LeadComment *CommentGroup
+ LineComment *CommentGroup
+}
+
+func (l *LiteralType) Pos() token.Pos {
+ return l.Token.Pos
+}
+
+// ListStatement represents a HCL List type
+type ListType struct {
+ Lbrack token.Pos // position of "["
+ Rbrack token.Pos // position of "]"
+ List []Node // the elements in lexical order
+}
+
+func (l *ListType) Pos() token.Pos {
+ return l.Lbrack
+}
+
+func (l *ListType) Add(node Node) {
+ l.List = append(l.List, node)
+}
+
+// ObjectType represents a HCL Object Type
+type ObjectType struct {
+ Lbrace token.Pos // position of "{"
+ Rbrace token.Pos // position of "}"
+ List *ObjectList // the nodes in lexical order
+}
+
+func (o *ObjectType) Pos() token.Pos {
+ return o.Lbrace
+}
+
+// Comment node represents a single //, # style or /*- style commment
+type Comment struct {
+ Start token.Pos // position of / or #
+ Text string
+}
+
+func (c *Comment) Pos() token.Pos {
+ return c.Start
+}
+
+// CommentGroup node represents a sequence of comments with no other tokens and
+// no empty lines between.
+type CommentGroup struct {
+ List []*Comment // len(List) > 0
+}
+
+func (c *CommentGroup) Pos() token.Pos {
+ return c.List[0].Pos()
+}
+
+//-------------------------------------------------------------------
+// GoStringer
+//-------------------------------------------------------------------
+
+func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
+func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
new file mode 100644
index 00000000..ba07ad42
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
@@ -0,0 +1,52 @@
+package ast
+
+import "fmt"
+
+// WalkFunc describes a function to be called for each node during a Walk. The
+// returned node can be used to rewrite the AST. Walking stops the returned
+// bool is false.
+type WalkFunc func(Node) (Node, bool)
+
+// Walk traverses an AST in depth-first order: It starts by calling fn(node);
+// node must not be nil. If fn returns true, Walk invokes fn recursively for
+// each of the non-nil children of node, followed by a call of fn(nil). The
+// returned node of fn can be used to rewrite the passed node to fn.
+func Walk(node Node, fn WalkFunc) Node {
+ rewritten, ok := fn(node)
+ if !ok {
+ return rewritten
+ }
+
+ switch n := node.(type) {
+ case *File:
+ n.Node = Walk(n.Node, fn)
+ case *ObjectList:
+ for i, item := range n.Items {
+ n.Items[i] = Walk(item, fn).(*ObjectItem)
+ }
+ case *ObjectKey:
+ // nothing to do
+ case *ObjectItem:
+ for i, k := range n.Keys {
+ n.Keys[i] = Walk(k, fn).(*ObjectKey)
+ }
+
+ if n.Val != nil {
+ n.Val = Walk(n.Val, fn)
+ }
+ case *LiteralType:
+ // nothing to do
+ case *ListType:
+ for i, l := range n.List {
+ n.List[i] = Walk(l, fn)
+ }
+ case *ObjectType:
+ n.List = Walk(n.List, fn).(*ObjectList)
+ default:
+ // should we panic here?
+ fmt.Printf("unknown type: %T\n", n)
+ }
+
+ fn(nil)
+ return rewritten
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
new file mode 100644
index 00000000..5c99381d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
@@ -0,0 +1,17 @@
+package parser
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// PosError is a parse error that contains a position.
+type PosError struct {
+ Pos token.Pos
+ Err error
+}
+
+func (e *PosError) Error() string {
+ return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
new file mode 100644
index 00000000..6e54bed9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -0,0 +1,514 @@
+// Package parser implements a parser for HCL (HashiCorp Configuration
+// Language)
+package parser
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/scanner"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ comments []*ast.CommentGroup
+ leadComment *ast.CommentGroup // last lead comment
+ lineComment *ast.CommentGroup // last line comment
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = &PosError{Pos: pos, Err: errors.New(msg)}
+ }
+
+ f.Node, err = p.objectList(false)
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ f.Comments = p.comments
+ return f, nil
+}
+
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ if obj {
+ tok := p.scan()
+ p.unscan()
+ if tok.Type == token.RBRACE {
+ break
+ }
+ }
+
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // object lists can be optionally comma-delimited e.g. when a list of maps
+ // is being expressed, so a comma is allowed here - it's simply consumed
+ tok := p.scan()
+ if tok.Type != token.COMMA {
+ p.unscan()
+ }
+ }
+ return node, nil
+}
+
+func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
+ endline = p.tok.Pos.Line
+
+ // count the endline if it's multiline comment, ie starting with /*
+ if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.tok.Text); i++ {
+ if p.tok.Text[i] == '\n' {
+ endline++
+ }
+ }
+ }
+
+ comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
+ p.tok = p.sc.Scan()
+ return
+}
+
+func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+ var list []*ast.Comment
+ endline = p.tok.Pos.Line
+
+ for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
+ var comment *ast.Comment
+ comment, endline = p.consumeComment()
+ list = append(list, comment)
+ }
+
+ // add comment group to the comments list
+ comments = &ast.CommentGroup{List: list}
+ p.comments = append(p.comments, comments)
+
+ return
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if len(keys) > 0 && err == errEofToken {
+ // We ignore eof token here since it is an error if we didn't
+ // receive a value (but we did receive a key) for the item.
+ err = nil
+ }
+ if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
+ // This is a strange boolean statement, but what it means is:
+ // We have keys with no value, and we're likely in an object
+ // (since RBrace ends an object). For this, we set err to nil so
+ // we continue and get the error below of having the wrong value
+ // type.
+ err = nil
+
+ // Reset the token type so we don't think it completed fine. See
+ // objectType which uses p.tok.Type to check if we're done with
+ // the object.
+ p.tok.Type = token.EOF
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ if p.leadComment != nil {
+ o.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ switch p.tok.Type {
+ case token.ASSIGN:
+ o.Assign = p.tok.Pos
+ o.Val, err = p.object()
+ if err != nil {
+ return nil, err
+ }
+ case token.LBRACE:
+ o.Val, err = p.objectType()
+ if err != nil {
+ return nil, err
+ }
+ default:
+ keyStr := make([]string, 0, len(keys))
+ for _, k := range keys {
+ keyStr = append(keyStr, k.Token.Text)
+ }
+
+ return nil, fmt.Errorf(
+ "key '%s' expected start of object ('{') or assignment ('=')",
+ strings.Join(keyStr, " "))
+ }
+
+ // do a look-ahead for line comment
+ p.scan()
+ if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
+ o.LineComment = p.lineComment
+ p.lineComment = nil
+ }
+ p.unscan()
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ // It is very important to also return the keys here as well as
+ // the error. This is because we need to be able to tell if we
+ // did parse keys prior to finding the EOF, or if we just found
+ // a bare EOF.
+ return keys, errEofToken
+ case token.ASSIGN:
+ // assignment or object only, but not nested objects. this is not
+ // allowed: `foo bar = {}`
+ if keyCount > 1 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
+ }
+ }
+
+ if keyCount == 0 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: errors.New("no object keys found!"),
+ }
+ }
+
+ return keys, nil
+ case token.LBRACE:
+ var err error
+
+ // If we have no keys, then it is a syntax error. i.e. {{}} is not
+ // allowed.
+ if len(keys) == 0 {
+ err = &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
+ }
+ }
+
+ // object
+ return keys, err
+ case token.IDENT, token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{Token: p.tok})
+ case token.ILLEGAL:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("illegal character"),
+ }
+ default:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
+ }
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (ast.Node, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.COMMENT:
+ // implement comment
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("Unknown token: %+v", tok),
+ }
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{
+ Lbrace: p.tok.Pos,
+ }
+
+ l, err := p.objectList(true)
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ // No error, scan and expect the ending to be a brace
+ if tok := p.scan(); tok.Type != token.RBRACE {
+ return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type)
+ }
+
+ o.List = l
+ o.Rbrace = p.tok.Pos // advanced via parseObjectList
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{
+ Lbrack: p.tok.Pos,
+ }
+
+ needComma := false
+ for {
+ tok := p.scan()
+ if needComma {
+ switch tok.Type {
+ case token.COMMA, token.RBRACK:
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error parsing list, expected comma or list end, got: %s",
+ tok.Type),
+ }
+ }
+ }
+ switch tok.Type {
+ case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ // If there is a lead comment, apply it
+ if p.leadComment != nil {
+ node.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ l.Add(node)
+ needComma = true
+ case token.COMMA:
+ // get next list item or we are at the end
+ // do a look-ahead for line comment
+ p.scan()
+ if p.lineComment != nil && len(l.List) > 0 {
+ lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
+ if ok {
+ lit.LineComment = p.lineComment
+ l.List[len(l.List)-1] = lit
+ p.lineComment = nil
+ }
+ }
+ p.unscan()
+
+ needComma = false
+ continue
+ case token.LBRACE:
+ // Looks like a nested object, so parse it out
+ node, err := p.objectType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse object within list: %s", err),
+ }
+ }
+ l.Add(node)
+ needComma = true
+ case token.LBRACK:
+ node, err := p.listType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse list within list: %s", err),
+ }
+ }
+ l.Add(node)
+ case token.RBRACK:
+ // finished
+ l.Rbrack = p.tok.Pos
+ return l, nil
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
+ }
+ }
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok,
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead. In the process, it collects any
+// comment groups encountered, and remembers the last lead and line comments.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ // Otherwise read the next token from the scanner and Save it to the buffer
+ // in case we unscan later.
+ prev := p.tok
+ p.tok = p.sc.Scan()
+
+ if p.tok.Type == token.COMMENT {
+ var comment *ast.CommentGroup
+ var endline int
+
+ // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
+ // p.tok.Pos.Line, prev.Pos.Line, endline)
+ if p.tok.Pos.Line == prev.Pos.Line {
+ // The comment is on same line as the previous token; it
+ // cannot be a lead comment but may be a line comment.
+ comment, endline = p.consumeCommentGroup(0)
+ if p.tok.Pos.Line != endline {
+ // The next token is on a different line, thus
+ // the last comment group is a line comment.
+ p.lineComment = comment
+ }
+ }
+
+ // consume successor comments, if any
+ endline = -1
+ for p.tok.Type == token.COMMENT {
+ comment, endline = p.consumeCommentGroup(1)
+ }
+
+ if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
+ switch p.tok.Type {
+ case token.RBRACE, token.RBRACK:
+ // Do not count for these cases
+ default:
+ // The next token is following on the line immediately after the
+ // comment group, thus the last comment group is a lead comment.
+ p.leadComment = comment
+ }
+ }
+
+ }
+
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
new file mode 100644
index 00000000..69662367
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -0,0 +1,651 @@
+// Package scanner implements a scanner for HCL (HashiCorp Configuration
+// Language) source text.
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "regexp"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ if ch == utf8.RuneError && size == 1 {
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ // If we see a null character with data left, then that is an error
+ if ch == '\x00' && s.buf.Len() > 0 {
+ s.err("unexpected null character (0x00)")
+ return eof
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ tok = token.IDENT
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '#', '/':
+ tok = token.COMMENT
+ s.scanComment(ch)
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '<':
+ tok = token.HEREDOC
+ s.scanHeredoc()
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case '=':
+ tok = token.ASSIGN
+ case '+':
+ tok = token.ADD
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ tok = token.SUB
+ }
+ default:
+ s.err("illegal char")
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+func (s *Scanner) scanComment(ch rune) {
+ // single line comments
+ if ch == '#' || (ch == '/' && s.peek() != '*') {
+ if ch == '/' && s.peek() != '/' {
+ s.err("expected '/' for comment")
+ return
+ }
+
+ ch = s.next()
+ for ch != '\n' && ch >= 0 && ch != eof {
+ ch = s.next()
+ }
+ if ch != eof && ch >= 0 {
+ s.unread()
+ }
+ return
+ }
+
+ // be sure we get the character after /* This allows us to find comment's
+ // that are not erminated
+ if ch == '/' {
+ s.next()
+ ch = s.next() // read character after "/*"
+ }
+
+ // look for /* - style comments
+ for {
+ if ch < 0 || ch == eof {
+ s.err("comment not terminated")
+ break
+ }
+
+ ch0 := ch
+ ch = s.next()
+ if ch0 == '*' && ch == '/' {
+ break
+ }
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ if ch == '0' {
+ // check for hexadecimal, octal or float
+ ch = s.next()
+ if ch == 'x' || ch == 'X' {
+ // hexadecimal
+ ch = s.next()
+ found := false
+ for isHexadecimal(ch) {
+ ch = s.next()
+ found = true
+ }
+
+ if !found {
+ s.err("illegal hexadecimal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ return token.NUMBER
+ }
+
+ // now it's either something like: 0421(octal) or 0.1231(float)
+ illegalOctal := false
+ for isDecimal(ch) {
+ ch = s.next()
+ if ch == '8' || ch == '9' {
+ // this is just a possibility. For example 0159 is illegal, but
+ // 0159.23 is valid. So we mark a possible illegal octal. If
+ // the next character is not a period, we'll print the error.
+ illegalOctal = true
+ }
+ }
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if illegalOctal {
+ s.err("illegal octal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+ }
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa begining from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanHeredoc scans a heredoc string
+func (s *Scanner) scanHeredoc() {
+ // Scan the second '<' in example: '<<EOF'
+ if s.next() != '<' {
+ s.err("heredoc expected second '<', didn't see it")
+ return
+ }
+
+ // Get the original offset so we can read just the heredoc ident
+ offs := s.srcPos.Offset
+
+ // Scan the identifier
+ ch := s.next()
+
+ // Indented heredoc syntax
+ if ch == '-' {
+ ch = s.next()
+ }
+
+ for isLetter(ch) || isDigit(ch) {
+ ch = s.next()
+ }
+
+ // If we reached an EOF then that is not good
+ if ch == eof {
+ s.err("heredoc not terminated")
+ return
+ }
+
+ // Ignore the '\r' in Windows line endings
+ if ch == '\r' {
+ if s.peek() == '\n' {
+ ch = s.next()
+ }
+ }
+
+ // If we didn't reach a newline then that is also not good
+ if ch != '\n' {
+ s.err("invalid characters in heredoc anchor")
+ return
+ }
+
+ // Read the identifier
+ identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
+ if len(identBytes) == 0 {
+ s.err("zero-length heredoc anchor")
+ return
+ }
+
+ var identRegexp *regexp.Regexp
+ if identBytes[0] == '-' {
+ identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
+ } else {
+ identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
+ }
+
+ // Read the actual string value
+ lineStart := s.srcPos.Offset
+ for {
+ ch := s.next()
+
+ // Special newline handling.
+ if ch == '\n' {
+ // Math is fast, so we first compare the byte counts to see if we have a chance
+ // of seeing the same identifier - if the length is less than the number of bytes
+ // in the identifier, this cannot be a valid terminator.
+ lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
+ if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
+ break
+ }
+
+ // Not an anchor match, record the start of a new line
+ lineStart = s.srcPos.Offset
+ }
+
+ if ch == eof {
+ s.err("heredoc not terminated")
+ return
+ }
+ }
+
+ return
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' && braces == 0 {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ start := n
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ if ch == eof {
+ // If we see an EOF, we halt any more scanning of digits
+ // immediately.
+ break
+ }
+
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ if n != start {
+ // we scanned all digits, put the last non digit char back,
+ // only if we read anything at all
+ s.unread()
+ }
+
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isDigit returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isDecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
new file mode 100644
index 00000000..5f981eaa
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -0,0 +1,241 @@
+package strconv
+
+import (
+ "errors"
+ "unicode/utf8"
+)
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes. (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (t string, err error) {
+ n := len(s)
+ if n < 2 {
+ return "", ErrSyntax
+ }
+ quote := s[0]
+ if quote != s[n-1] {
+ return "", ErrSyntax
+ }
+ s = s[1 : n-1]
+
+ if quote != '"' {
+ return "", ErrSyntax
+ }
+ if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+ return "", ErrSyntax
+ }
+
+ // Is it trivial? Avoid allocation.
+ if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
+ switch quote {
+ case '"':
+ return s, nil
+ case '\'':
+ r, size := utf8.DecodeRuneInString(s)
+ if size == len(s) && (r != utf8.RuneError || size != 1) {
+ return s, nil
+ }
+ }
+ }
+
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+ for len(s) > 0 {
+ // If we're starting a '${}' then let it through un-unquoted.
+ // Specifically: we don't unquote any characters within the `${}`
+ // section.
+ if s[0] == '$' && len(s) > 1 && s[1] == '{' {
+ buf = append(buf, '$', '{')
+ s = s[2:]
+
+ // Continue reading until we find the closing brace, copying as-is
+ braces := 1
+ for len(s) > 0 && braces > 0 {
+ r, size := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError {
+ return "", ErrSyntax
+ }
+
+ s = s[size:]
+
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+
+ switch r {
+ case '{':
+ braces++
+ case '}':
+ braces--
+ }
+ }
+ if braces != 0 {
+ return "", ErrSyntax
+ }
+ if len(s) == 0 {
+ // If there's no string left, we're done!
+ break
+ } else {
+ // If there's more left, we need to pop back up to the top of the loop
+ // in case there's another interpolation in this string.
+ continue
+ }
+ }
+
+ if s[0] == '\n' {
+ return "", ErrSyntax
+ }
+
+ c, multibyte, ss, err := unquoteChar(s, quote)
+ if err != nil {
+ return "", err
+ }
+ s = ss
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ if quote == '\'' && len(s) != 0 {
+ // single-quoted must be single character
+ return "", ErrSyntax
+ }
+ }
+ return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return
+}
+
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+ // easy cases
+ switch c := s[0]; {
+ case c == quote && (quote == '\'' || quote == '"'):
+ err = ErrSyntax
+ return
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // hard case: c is backslash
+ if len(s) <= 1 {
+ err = ErrSyntax
+ return
+ }
+ c := s[1]
+ s = s[2:]
+
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case 'x', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = ErrSyntax
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if c == 'x' {
+ // single-byte string, possibly not UTF-8
+ value = v
+ break
+ }
+ if v > utf8.MaxRune {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ multibyte = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ v := rune(c) - '0'
+ if len(s) < 2 {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < 2; j++ { // one digit already; two more
+ x := rune(s[j]) - '0'
+ if x < 0 || x > 7 {
+ err = ErrSyntax
+ return
+ }
+ v = (v << 3) | x
+ }
+ s = s[2:]
+ if v > 255 {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ case '\\':
+ value = '\\'
+ case '\'', '"':
+ if c != quote {
+ err = ErrSyntax
+ return
+ }
+ value = rune(c)
+ default:
+ err = ErrSyntax
+ return
+ }
+ tail = s
+ return
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
new file mode 100644
index 00000000..59c1bb72
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
new file mode 100644
index 00000000..e37c0664
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
@@ -0,0 +1,219 @@
+// Package token defines constants representing the lexical tokens for HCL
+// (HashiCorp Configuration Language)
+package token
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+ JSON bool
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+ COMMENT
+
+ identifier_beg
+ IDENT // literals
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ HEREDOC // <<FOO\nbar\nFOO
+ literal_end
+ identifier_end
+
+ operator_beg
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+
+ RBRACK // ]
+ RBRACE // }
+
+ ASSIGN // =
+ ADD // +
+ SUB // -
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+ COMMENT: "COMMENT",
+
+ IDENT: "IDENT",
+ NUMBER: "NUMBER",
+ FLOAT: "FLOAT",
+ BOOL: "BOOL",
+ STRING: "STRING",
+
+ LBRACK: "LBRACK",
+ LBRACE: "LBRACE",
+ COMMA: "COMMA",
+ PERIOD: "PERIOD",
+ HEREDOC: "HEREDOC",
+
+ RBRACK: "RBRACK",
+ RBRACE: "RBRACE",
+
+ ASSIGN: "ASSIGN",
+ ADD: "ADD",
+ SUB: "SUB",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+ s := ""
+ if 0 <= t && t < Type(len(tokens)) {
+ s = tokens[t]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(t)) + ")"
+ }
+ return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+ return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// Value returns the properly typed value for this token. The type of
+// the returned interface{} is guaranteed based on the Type field.
+//
+// This can only be called for literal types. If it is called for any other
+// type, this will panic.
+func (t Token) Value() interface{} {
+ switch t.Type {
+ case BOOL:
+ if t.Text == "true" {
+ return true
+ } else if t.Text == "false" {
+ return false
+ }
+
+ panic("unknown bool value: " + t.Text)
+ case FLOAT:
+ v, err := strconv.ParseFloat(t.Text, 64)
+ if err != nil {
+ panic(err)
+ }
+
+ return float64(v)
+ case NUMBER:
+ v, err := strconv.ParseInt(t.Text, 0, 64)
+ if err != nil {
+ panic(err)
+ }
+
+ return int64(v)
+ case IDENT:
+ return t.Text
+ case HEREDOC:
+ return unindentHeredoc(t.Text)
+ case STRING:
+ // Determine the Unquote method to use. If it came from JSON,
+ // then we need to use the built-in unquote since we have to
+ // escape interpolations there.
+ f := hclstrconv.Unquote
+ if t.JSON {
+ f = strconv.Unquote
+ }
+
+ // This case occurs if json null is used
+ if t.Text == "" {
+ return ""
+ }
+
+ v, err := f(t.Text)
+ if err != nil {
+ panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
+ }
+
+ return v
+ default:
+ panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
+ }
+}
+
+// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
+// and the content of a HEREDOC with the hanging indent removed if it is started with
+// a <<-, and the terminating line is at least as indented as the least indented line.
+func unindentHeredoc(heredoc string) string {
+ // We need to find the end of the marker
+ idx := strings.IndexByte(heredoc, '\n')
+ if idx == -1 {
+ panic("heredoc doesn't contain newline")
+ }
+
+ unindent := heredoc[2] == '-'
+
+ // We can optimize if the heredoc isn't marked for indentation
+ if !unindent {
+ return string(heredoc[idx+1 : len(heredoc)-idx+1])
+ }
+
+ // We need to unindent each line based on the indentation level of the marker
+ lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
+ whitespacePrefix := lines[len(lines)-1]
+
+ isIndented := true
+ for _, v := range lines {
+ if strings.HasPrefix(v, whitespacePrefix) {
+ continue
+ }
+
+ isIndented = false
+ break
+ }
+
+ // If all lines are not at least as indented as the terminating mark, return the
+ // heredoc as is, but trim the leading space from the marker on the final line.
+ if !isIndented {
+ return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
+ }
+
+ unindentedLines := make([]string, len(lines))
+ for k, v := range lines {
+ if k == len(lines)-1 {
+ unindentedLines[k] = ""
+ break
+ }
+
+ unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
+ }
+
+ return strings.Join(unindentedLines, "\n")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
new file mode 100644
index 00000000..f652d6fe
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
@@ -0,0 +1,117 @@
+package parser
+
+import "github.com/hashicorp/hcl/hcl/ast"
+
+// flattenObjects takes an AST node, walks it, and flattens
+func flattenObjects(node ast.Node) {
+ ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
+ // We only care about lists, because this is what we modify
+ list, ok := n.(*ast.ObjectList)
+ if !ok {
+ return n, true
+ }
+
+ // Rebuild the item list
+ items := make([]*ast.ObjectItem, 0, len(list.Items))
+ frontier := make([]*ast.ObjectItem, len(list.Items))
+ copy(frontier, list.Items)
+ for len(frontier) > 0 {
+ // Pop the current item
+ n := len(frontier)
+ item := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ switch v := item.Val.(type) {
+ case *ast.ObjectType:
+ items, frontier = flattenObjectType(v, item, items, frontier)
+ case *ast.ListType:
+ items, frontier = flattenListType(v, item, items, frontier)
+ default:
+ items = append(items, item)
+ }
+ }
+
+ // Reverse the list since the frontier model runs things backwards
+ for i := len(items)/2 - 1; i >= 0; i-- {
+ opp := len(items) - 1 - i
+ items[i], items[opp] = items[opp], items[i]
+ }
+
+ // Done! Set the original items
+ list.Items = items
+ return n, true
+ })
+}
+
+func flattenListType(
+ ot *ast.ListType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list is empty, keep the original list
+ if len(ot.List) == 0 {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List {
+ if _, ok := subitem.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, elem := range ot.List {
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: item.Keys,
+ Assign: item.Assign,
+ Val: elem,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
+
+func flattenObjectType(
+ ot *ast.ObjectType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list has no items we do not have to flatten anything
+ if ot.List.Items == nil {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List.Items {
+ if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, subitem := range ot.List.Items {
+ // Copy the new key
+ keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+ copy(keys, item.Keys)
+ copy(keys[len(item.Keys):], subitem.Keys)
+
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: keys,
+ Assign: item.Assign,
+ Val: subitem.Val,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
new file mode 100644
index 00000000..125a5f07
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -0,0 +1,313 @@
+package parser
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+ "github.com/hashicorp/hcl/json/scanner"
+ "github.com/hashicorp/hcl/json/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = fmt.Errorf("%s: %s", pos, msg)
+ }
+
+ // The root must be an object in JSON
+ object, err := p.object()
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // We make our final node an object list so it is more HCL compatible
+ f.Node = object.List
+
+ // Flatten it, which finds patterns and turns them into more HCL-like
+ // AST trees.
+ flattenObjects(f.Node)
+
+ return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // Check for a followup comma. If it isn't a comma, then we're done
+ if tok := p.scan(); tok.Type != token.COMMA {
+ break
+ }
+ }
+
+ return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ switch p.tok.Type {
+ case token.COLON:
+ pos := p.tok.Pos
+ o.Assign = hcltoken.Pos{
+ Filename: pos.Filename,
+ Offset: pos.Offset,
+ Line: pos.Line,
+ Column: pos.Column,
+ }
+
+ o.Val, err = p.objectValue()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ return nil, errEofToken
+ case token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{
+ Token: p.tok.HCLToken(),
+ })
+ case token.COLON:
+ // If we have a zero keycount it means that we never got
+ // an object key, i.e. `{ :`. This is a syntax error.
+ if keyCount == 0 {
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+
+ // Done
+ return keys, nil
+ case token.ILLEGAL:
+ return nil, errors.New("illegal")
+ default:
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+ defer un(trace(p, "ParseObjectValue"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.LBRACE:
+ return p.objectType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{}
+
+ l, err := p.objectList()
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ o.List = l
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{}
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.STRING:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.COMMA:
+ continue
+ case token.LBRACE:
+ node, err := p.objectType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.BOOL:
+ // TODO(arslan) should we support? not supported by HCL yet
+ case token.LBRACK:
+ // TODO(arslan) should we support nested lists? Even though it's
+ // written in README of HCL, it's not a part of the grammar
+ // (not defined in parse.y)
+ case token.RBRACK:
+ // finished
+ return l, nil
+ default:
+ return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+ }
+
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok.HCLToken(),
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ p.tok = p.sc.Scan()
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
new file mode 100644
index 00000000..dd5c72bb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
@@ -0,0 +1,451 @@
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/json/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ if ch == utf8.RuneError && size == 1 {
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ } else if lit == "null" {
+ tok = token.NULL
+ } else {
+ s.err("illegal char")
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case ':':
+ tok = token.COLON
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ s.err("illegal char")
+ }
+ default:
+ s.err("illegal char: " + string(ch))
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ zero := ch == '0'
+ pos := s.srcPos
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ // If we have a larger number and this is zero, error
+ if zero && pos != s.srcPos {
+ s.err("numbers cannot start with 0")
+ }
+
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa begining from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if ch == '\n' || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ // we scanned all digits, put the last non digit char back
+ s.unread()
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go
new file mode 100644
index 00000000..59c1bb72
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go
new file mode 100644
index 00000000..95a0c3ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token.go
@@ -0,0 +1,118 @@
+package token
+
+import (
+ "fmt"
+ "strconv"
+
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+
+ identifier_beg
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ NULL // null
+ literal_end
+ identifier_end
+
+ operator_beg
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+ COLON // :
+
+ RBRACK // ]
+ RBRACE // }
+
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+
+ NUMBER: "NUMBER",
+ FLOAT: "FLOAT",
+ BOOL: "BOOL",
+ STRING: "STRING",
+ NULL: "NULL",
+
+ LBRACK: "LBRACK",
+ LBRACE: "LBRACE",
+ COMMA: "COMMA",
+ PERIOD: "PERIOD",
+ COLON: "COLON",
+
+ RBRACK: "RBRACK",
+ RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+ s := ""
+ if 0 <= t && t < Type(len(tokens)) {
+ s = tokens[t]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(t)) + ")"
+ }
+ return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+ return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+ switch t.Type {
+ case BOOL:
+ return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+ case FLOAT:
+ return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+ case NULL:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+ case NUMBER:
+ return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+ case STRING:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+ default:
+ panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go
new file mode 100644
index 00000000..d9993c29
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/lex.go
@@ -0,0 +1,38 @@
+package hcl
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+type lexModeValue byte
+
+const (
+ lexModeUnknown lexModeValue = iota
+ lexModeHcl
+ lexModeJson
+)
+
+// lexMode returns whether we're going to be parsing in JSON
+// mode or HCL mode.
+func lexMode(v []byte) lexModeValue {
+ var (
+ r rune
+ w int
+ offset int
+ )
+
+ for {
+ r, w = utf8.DecodeRune(v[offset:])
+ offset += w
+ if unicode.IsSpace(r) {
+ continue
+ }
+ if r == '{' {
+ return lexModeJson
+ }
+ break
+ }
+
+ return lexModeHcl
+}
diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go
new file mode 100644
index 00000000..1fca53c4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/parse.go
@@ -0,0 +1,39 @@
+package hcl
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hclParser "github.com/hashicorp/hcl/hcl/parser"
+ jsonParser "github.com/hashicorp/hcl/json/parser"
+)
+
+// ParseBytes accepts as input byte slice and returns ast tree.
+//
+// Input can be either JSON or HCL
+func ParseBytes(in []byte) (*ast.File, error) {
+ return parse(in)
+}
+
+// ParseString accepts input as a string and returns ast tree.
+func ParseString(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
+
+func parse(in []byte) (*ast.File, error) {
+ switch lexMode(in) {
+ case lexModeHcl:
+ return hclParser.Parse(in)
+ case lexModeJson:
+ return jsonParser.Parse(in)
+ }
+
+ return nil, fmt.Errorf("unknown config format")
+}
+
+// Parse parses the given input and returns the root object.
+//
+// The input format can be either HCL or JSON.
+func Parse(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
diff --git a/vendor/github.com/hashicorp/hil/LICENSE b/vendor/github.com/hashicorp/hil/LICENSE
new file mode 100644
index 00000000..82b4de97
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/LICENSE
@@ -0,0 +1,353 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/hil/README.md b/vendor/github.com/hashicorp/hil/README.md
new file mode 100644
index 00000000..186ed251
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/README.md
@@ -0,0 +1,102 @@
+# HIL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil)
+
+HIL (HashiCorp Interpolation Language) is a lightweight embedded language used
+primarily for configuration interpolation. The goal of HIL is to make a simple
+language for interpolations in the various configurations of HashiCorp tools.
+
+HIL is built to interpolate any string, but is in use by HashiCorp primarily
+with [HCL](https://github.com/hashicorp/hcl). HCL is _not required_ in any
+way for use with HIL.
+
+HIL isn't meant to be a general purpose language. It was built for basic
+configuration interpolations. Therefore, you can't currently write functions,
+have conditionals, set intermediary variables, etc. within HIL itself. It is
+possible some of these may be added later but the right use case must exist.
+
+## Why?
+
+Many of our tools have support for something similar to templates, but
+within the configuration itself. The most prominent requirement was in
+[Terraform](https://github.com/hashicorp/terraform) where we wanted the
+configuration to be able to reference values from elsewhere in the
+configuration. Example:
+
+ foo = "hi ${var.world}"
+
+We originally used a full templating language for this, but found it
+was too heavy weight. Additionally, many full languages required bindings
+to C (and thus the usage of cgo) which we try to avoid to make cross-compilation
+easier. We then moved to very basic regular expression based
+string replacement, but found the need for basic arithmetic and function
+calls resulting in overly complex regular expressions.
+
+Ultimately, we wrote our own mini-language within Terraform itself. As
+we built other projects such as [Nomad](https://nomadproject.io) and
+[Otto](https://ottoproject.io), the need for basic interpolations arose
+again.
+
+Thus HIL was born. It is extracted from Terraform, cleaned up, and
+better tested for general purpose use.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammer is listed here.
+
+Code begins within `${` and `}`. Outside of this, text is treated
+literally. For example, `foo` is a valid HIL program that is just the
+string "foo", but `foo ${bar}` is an HIL program that is the string "foo "
+concatened with the value of `bar`. For the remainder of the syntax
+docs, we'll assume you're within `${}`.
+
+ * Identifiers are any text in the format of `[a-zA-Z0-9-.]`. Example
+ identifiers: `foo`, `var.foo`, `foo-bar`.
+
+ * Strings are double quoted and can contain any UTF-8 characters.
+ Example: `"Hello, World"`
+
+ * Numbers are assumed to be base 10. If you prefix a number with 0x,
+ it is treated as a hexadecimal. If it is prefixed with 0, it is
+ treated as an octal. Numbers can be in scientific notation: "1e10".
+
+ * Unary `-` can be used for negative numbers. Example: `-10` or `-0.2`
+
+ * Boolean values: `true`, `false`
+
+ * The following arithmetic operations are allowed: +, -, *, /, %.
+
+ * Function calls are in the form of `name(arg1, arg2, ...)`. Example:
+ `add(1, 5)`. Arguments can be any valid HIL expression, example:
+ `add(1, var.foo)` or even nested function calls:
+ `add(1, get("some value"))`.
+
+ * Within strings, further interpolations can be opened with `${}`.
+ Example: `"Hello ${nested}"`. A full example including the
+ original `${}` (remember this list assumes were inside of one
+ already) could be: `foo ${func("hello ${var.foo}")}`.
+
+## Language Changes
+
+We've used this mini-language in Terraform for years. For backwards compatibility
+reasons, we're unlikely to make an incompatible change to the language but
+we're not currently making that promise, either.
+
+The internal API of this project may very well change as we evolve it
+to work with more of our projects. We recommend using some sort of dependency
+management solution with this package.
+
+## Future Changes
+
+The following changes are already planned to be made at some point:
+
+ * Richer types: lists, maps, etc.
+
+ * Convert to a more standard Go parser structure similar to HCL. This
+ will improve our error messaging as well as allow us to have automatic
+ formatting.
+
+ * Allow interpolations to result in more types than just a string. While
+ within the interpolation basic types are honored, the result is always
+ a string.
diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic.go b/vendor/github.com/hashicorp/hil/ast/arithmetic.go
new file mode 100644
index 00000000..94dc24f8
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/arithmetic.go
@@ -0,0 +1,43 @@
+package ast
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Arithmetic represents a node where the result is arithmetic of
+// two or more operands in the order given.
+type Arithmetic struct {
+ Op ArithmeticOp
+ Exprs []Node
+ Posx Pos
+}
+
+func (n *Arithmetic) Accept(v Visitor) Node {
+ for i, expr := range n.Exprs {
+ n.Exprs[i] = expr.Accept(v)
+ }
+
+ return v(n)
+}
+
+func (n *Arithmetic) Pos() Pos {
+ return n.Posx
+}
+
+func (n *Arithmetic) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *Arithmetic) String() string {
+ var b bytes.Buffer
+ for _, expr := range n.Exprs {
+ b.WriteString(fmt.Sprintf("%s", expr))
+ }
+
+ return b.String()
+}
+
+func (n *Arithmetic) Type(Scope) (Type, error) {
+ return TypeInt, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go
new file mode 100644
index 00000000..18880c60
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go
@@ -0,0 +1,24 @@
+package ast
+
+// ArithmeticOp is the operation to use for the math.
+type ArithmeticOp int
+
+const (
+ ArithmeticOpInvalid ArithmeticOp = 0
+
+ ArithmeticOpAdd ArithmeticOp = iota
+ ArithmeticOpSub
+ ArithmeticOpMul
+ ArithmeticOpDiv
+ ArithmeticOpMod
+
+ ArithmeticOpLogicalAnd
+ ArithmeticOpLogicalOr
+
+ ArithmeticOpEqual
+ ArithmeticOpNotEqual
+ ArithmeticOpLessThan
+ ArithmeticOpLessThanOrEqual
+ ArithmeticOpGreaterThan
+ ArithmeticOpGreaterThanOrEqual
+)
diff --git a/vendor/github.com/hashicorp/hil/ast/ast.go b/vendor/github.com/hashicorp/hil/ast/ast.go
new file mode 100644
index 00000000..c6350f8b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/ast.go
@@ -0,0 +1,99 @@
+package ast
+
+import (
+ "fmt"
+)
+
+// Node is the interface that all AST nodes must implement.
+type Node interface {
+ // Accept is called to dispatch to the visitors. It must return the
+ // resulting Node (which might be different in an AST transform).
+ Accept(Visitor) Node
+
+ // Pos returns the position of this node in some source.
+ Pos() Pos
+
+ // Type returns the type of this node for the given context.
+ Type(Scope) (Type, error)
+}
+
+// Pos is the starting position of an AST node
+type Pos struct {
+ Column, Line int // Column/Line number, starting at 1
+ Filename string // Optional source filename, if known
+}
+
+func (p Pos) String() string {
+ if p.Filename == "" {
+ return fmt.Sprintf("%d:%d", p.Line, p.Column)
+ } else {
+ return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column)
+ }
+}
+
+// InitPos is an initiaial position value. This should be used as
+// the starting position (presets the column and line to 1).
+var InitPos = Pos{Column: 1, Line: 1}
+
+// Visitors are just implementations of this function.
+//
+// The function must return the Node to replace this node with. "nil" is
+// _not_ a valid return value. If there is no replacement, the original node
+// should be returned. We build this replacement directly into the visitor
+// pattern since AST transformations are a common and useful tool and
+// building it into the AST itself makes it required for future Node
+// implementations and very easy to do.
+//
+// Note that this isn't a true implementation of the visitor pattern, which
+// generally requires proper type dispatch on the function. However,
+// implementing this basic visitor pattern style is still very useful even
+// if you have to type switch.
+type Visitor func(Node) Node
+
+//go:generate stringer -type=Type
+
+// Type is the type of any value.
+type Type uint32
+
+const (
+ TypeInvalid Type = 0
+ TypeAny Type = 1 << iota
+ TypeBool
+ TypeString
+ TypeInt
+ TypeFloat
+ TypeList
+ TypeMap
+
+ // This is a special type used by Terraform to mark "unknown" values.
+ // It is impossible for this type to be introduced into your HIL programs
+ // unless you explicitly set a variable to this value. In that case,
+ // any operation including the variable will return "TypeUnknown" as the
+ // type.
+ TypeUnknown
+)
+
+func (t Type) Printable() string {
+ switch t {
+ case TypeInvalid:
+ return "invalid type"
+ case TypeAny:
+ return "any type"
+ case TypeBool:
+ return "type bool"
+ case TypeString:
+ return "type string"
+ case TypeInt:
+ return "type int"
+ case TypeFloat:
+ return "type float"
+ case TypeList:
+ return "type list"
+ case TypeMap:
+ return "type map"
+ case TypeUnknown:
+ return "type unknown"
+ default:
+ return "unknown type"
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/call.go b/vendor/github.com/hashicorp/hil/ast/call.go
new file mode 100644
index 00000000..05570110
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/call.go
@@ -0,0 +1,47 @@
+package ast
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Call represents a function call.
+type Call struct {
+ Func string
+ Args []Node
+ Posx Pos
+}
+
+func (n *Call) Accept(v Visitor) Node {
+ for i, a := range n.Args {
+ n.Args[i] = a.Accept(v)
+ }
+
+ return v(n)
+}
+
+func (n *Call) Pos() Pos {
+ return n.Posx
+}
+
+func (n *Call) String() string {
+ args := make([]string, len(n.Args))
+ for i, arg := range n.Args {
+ args[i] = fmt.Sprintf("%s", arg)
+ }
+
+ return fmt.Sprintf("Call(%s, %s)", n.Func, strings.Join(args, ", "))
+}
+
+func (n *Call) Type(s Scope) (Type, error) {
+ f, ok := s.LookupFunc(n.Func)
+ if !ok {
+ return TypeInvalid, fmt.Errorf("unknown function: %s", n.Func)
+ }
+
+ return f.ReturnType, nil
+}
+
+func (n *Call) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/conditional.go b/vendor/github.com/hashicorp/hil/ast/conditional.go
new file mode 100644
index 00000000..be48f89d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/conditional.go
@@ -0,0 +1,36 @@
+package ast
+
+import (
+ "fmt"
+)
+
+type Conditional struct {
+ CondExpr Node
+ TrueExpr Node
+ FalseExpr Node
+ Posx Pos
+}
+
+// Accept passes the given visitor to the child nodes in this order:
+// CondExpr, TrueExpr, FalseExpr. It then finally passes itself to the visitor.
+func (n *Conditional) Accept(v Visitor) Node {
+ n.CondExpr = n.CondExpr.Accept(v)
+ n.TrueExpr = n.TrueExpr.Accept(v)
+ n.FalseExpr = n.FalseExpr.Accept(v)
+
+ return v(n)
+}
+
+func (n *Conditional) Pos() Pos {
+ return n.Posx
+}
+
+func (n *Conditional) Type(Scope) (Type, error) {
+ // This is not actually a useful value; the type checker ignores
+ // this function when analyzing conditionals, just as with Arithmetic.
+ return TypeInt, nil
+}
+
+func (n *Conditional) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/index.go b/vendor/github.com/hashicorp/hil/ast/index.go
new file mode 100644
index 00000000..860c25fd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/index.go
@@ -0,0 +1,76 @@
+package ast
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Index represents an indexing operation into another data structure
+type Index struct {
+ Target Node
+ Key Node
+ Posx Pos
+}
+
+func (n *Index) Accept(v Visitor) Node {
+ n.Target = n.Target.Accept(v)
+ n.Key = n.Key.Accept(v)
+ return v(n)
+}
+
+func (n *Index) Pos() Pos {
+ return n.Posx
+}
+
+func (n *Index) String() string {
+ return fmt.Sprintf("Index(%s, %s)", n.Target, n.Key)
+}
+
+func (n *Index) Type(s Scope) (Type, error) {
+ variableAccess, ok := n.Target.(*VariableAccess)
+ if !ok {
+ return TypeInvalid, fmt.Errorf("target is not a variable")
+ }
+
+ variable, ok := s.LookupVar(variableAccess.Name)
+ if !ok {
+ return TypeInvalid, fmt.Errorf("unknown variable accessed: %s", variableAccess.Name)
+ }
+
+ switch variable.Type {
+ case TypeList:
+ return n.typeList(variable, variableAccess.Name)
+ case TypeMap:
+ return n.typeMap(variable, variableAccess.Name)
+ default:
+ return TypeInvalid, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type)
+ }
+}
+
+func (n *Index) typeList(variable Variable, variableName string) (Type, error) {
+ // We assume type checking has already determined that this is a list
+ list := variable.Value.([]Variable)
+
+ return VariableListElementTypesAreHomogenous(variableName, list)
+}
+
+func (n *Index) typeMap(variable Variable, variableName string) (Type, error) {
+ // We assume type checking has already determined that this is a map
+ vmap := variable.Value.(map[string]Variable)
+
+ return VariableMapValueTypesAreHomogenous(variableName, vmap)
+}
+
+func reportTypes(typesFound map[Type]struct{}) string {
+ stringTypes := make([]string, len(typesFound))
+ i := 0
+ for k, _ := range typesFound {
+ stringTypes[0] = k.String()
+ i++
+ }
+ return strings.Join(stringTypes, ", ")
+}
+
+func (n *Index) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/literal.go b/vendor/github.com/hashicorp/hil/ast/literal.go
new file mode 100644
index 00000000..da6014fe
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/literal.go
@@ -0,0 +1,88 @@
+package ast
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// LiteralNode represents a single literal value, such as "foo" or
+// 42 or 3.14159. Based on the Type, the Value can be safely cast.
+type LiteralNode struct {
+ Value interface{}
+ Typex Type
+ Posx Pos
+}
+
+// NewLiteralNode returns a new literal node representing the given
+// literal Go value, which must correspond to one of the primitive types
+// supported by HIL. Lists and maps cannot currently be constructed via
+// this function.
+//
+// If an inappropriately-typed value is provided, this function will
+// return an error. The main intended use of this function is to produce
+// "synthetic" literals from constants in code, where the value type is
+// well known at compile time. To easily store these in global variables,
+// see also MustNewLiteralNode.
+func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) {
+ goType := reflect.TypeOf(value)
+ var hilType Type
+
+ switch goType.Kind() {
+ case reflect.Bool:
+ hilType = TypeBool
+ case reflect.Int:
+ hilType = TypeInt
+ case reflect.Float64:
+ hilType = TypeFloat
+ case reflect.String:
+ hilType = TypeString
+ default:
+ return nil, fmt.Errorf("unsupported literal node type: %T", value)
+ }
+
+ return &LiteralNode{
+ Value: value,
+ Typex: hilType,
+ Posx: pos,
+ }, nil
+}
+
+// MustNewLiteralNode wraps NewLiteralNode and panics if an error is
+// returned, thus allowing valid literal nodes to be easily assigned to
+// global variables.
+func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode {
+ node, err := NewLiteralNode(value, pos)
+ if err != nil {
+ panic(err)
+ }
+ return node
+}
+
+func (n *LiteralNode) Accept(v Visitor) Node {
+ return v(n)
+}
+
+func (n *LiteralNode) Pos() Pos {
+ return n.Posx
+}
+
+func (n *LiteralNode) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *LiteralNode) String() string {
+ return fmt.Sprintf("Literal(%s, %v)", n.Typex, n.Value)
+}
+
+func (n *LiteralNode) Type(Scope) (Type, error) {
+ return n.Typex, nil
+}
+
+// IsUnknown returns true either if the node's value is itself unknown
+// of if it is a collection containing any unknown elements, deeply.
+func (n *LiteralNode) IsUnknown() bool {
+ return IsUnknown(Variable{
+ Type: n.Typex,
+ Value: n.Value,
+ })
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/output.go b/vendor/github.com/hashicorp/hil/ast/output.go
new file mode 100644
index 00000000..1e27f970
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/output.go
@@ -0,0 +1,78 @@
+package ast
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Output represents the root node of all interpolation evaluations. If the
+// output only has one expression which is either a TypeList or TypeMap, the
+// Output can be type-asserted to []interface{} or map[string]interface{}
+// respectively. Otherwise the Output evaluates as a string, and concatenates
+// the evaluation of each expression.
+type Output struct {
+ Exprs []Node
+ Posx Pos
+}
+
+func (n *Output) Accept(v Visitor) Node {
+ for i, expr := range n.Exprs {
+ n.Exprs[i] = expr.Accept(v)
+ }
+
+ return v(n)
+}
+
+func (n *Output) Pos() Pos {
+ return n.Posx
+}
+
+func (n *Output) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *Output) String() string {
+ var b bytes.Buffer
+ for _, expr := range n.Exprs {
+ b.WriteString(fmt.Sprintf("%s", expr))
+ }
+
+ return b.String()
+}
+
+func (n *Output) Type(s Scope) (Type, error) {
+ // Special case no expressions for backward compatibility
+ if len(n.Exprs) == 0 {
+ return TypeString, nil
+ }
+
+ // Special case a single expression of types list or map
+ if len(n.Exprs) == 1 {
+ exprType, err := n.Exprs[0].Type(s)
+ if err != nil {
+ return TypeInvalid, err
+ }
+ switch exprType {
+ case TypeList:
+ return TypeList, nil
+ case TypeMap:
+ return TypeMap, nil
+ }
+ }
+
+ // Otherwise ensure all our expressions are strings
+ for index, expr := range n.Exprs {
+ exprType, err := expr.Type(s)
+ if err != nil {
+ return TypeInvalid, err
+ }
+ // We only look for things we know we can't coerce with an implicit conversion func
+ if exprType == TypeList || exprType == TypeMap {
+ return TypeInvalid, fmt.Errorf(
+ "multi-expression HIL outputs may only have string inputs: %d is type %s",
+ index, exprType)
+ }
+ }
+
+ return TypeString, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/scope.go b/vendor/github.com/hashicorp/hil/ast/scope.go
new file mode 100644
index 00000000..7a975d99
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/scope.go
@@ -0,0 +1,90 @@
+package ast
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Scope is the interface used to look up variables and functions while
+// evaluating. How these functions/variables are defined are up to the caller.
+type Scope interface {
+ LookupFunc(string) (Function, bool)
+ LookupVar(string) (Variable, bool)
+}
+
+// Variable is a variable value for execution given as input to the engine.
+// It records the value of a variables along with their type.
+type Variable struct {
+ Value interface{}
+ Type Type
+}
+
+// NewVariable creates a new Variable for the given value. This will
+// attempt to infer the correct type. If it can't, an error will be returned.
+func NewVariable(v interface{}) (result Variable, err error) {
+ switch v := reflect.ValueOf(v); v.Kind() {
+ case reflect.String:
+ result.Type = TypeString
+ default:
+ err = fmt.Errorf("Unknown type: %s", v.Kind())
+ }
+
+ result.Value = v
+ return
+}
+
+// String implements Stringer on Variable, displaying the type and value
+// of the Variable.
+func (v Variable) String() string {
+ return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value)
+}
+
+// Function defines a function that can be executed by the engine.
+// The type checker will validate that the proper types will be called
+// to the callback.
+type Function struct {
+ // ArgTypes is the list of types in argument order. These are the
+ // required arguments.
+ //
+ // ReturnType is the type of the returned value. The Callback MUST
+ // return this type.
+ ArgTypes []Type
+ ReturnType Type
+
+ // Variadic, if true, says that this function is variadic, meaning
+ // it takes a variable number of arguments. In this case, the
+ // VariadicType must be set.
+ Variadic bool
+ VariadicType Type
+
+ // Callback is the function called for a function. The argument
+ // types are guaranteed to match the spec above by the type checker.
+ // The length of the args is strictly == len(ArgTypes) unless Varidiac
+ // is true, in which case its >= len(ArgTypes).
+ Callback func([]interface{}) (interface{}, error)
+}
+
+// BasicScope is a simple scope that looks up variables and functions
+// using a map.
+type BasicScope struct {
+ FuncMap map[string]Function
+ VarMap map[string]Variable
+}
+
+func (s *BasicScope) LookupFunc(n string) (Function, bool) {
+ if s == nil {
+ return Function{}, false
+ }
+
+ v, ok := s.FuncMap[n]
+ return v, ok
+}
+
+func (s *BasicScope) LookupVar(n string) (Variable, bool) {
+ if s == nil {
+ return Variable{}, false
+ }
+
+ v, ok := s.VarMap[n]
+ return v, ok
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/stack.go b/vendor/github.com/hashicorp/hil/ast/stack.go
new file mode 100644
index 00000000..bd2bc157
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/stack.go
@@ -0,0 +1,25 @@
+package ast
+
+// Stack is a stack of Node.
+type Stack struct {
+ stack []Node
+}
+
+func (s *Stack) Len() int {
+ return len(s.stack)
+}
+
+func (s *Stack) Push(n Node) {
+ s.stack = append(s.stack, n)
+}
+
+func (s *Stack) Pop() Node {
+ x := s.stack[len(s.stack)-1]
+ s.stack[len(s.stack)-1] = nil
+ s.stack = s.stack[:len(s.stack)-1]
+ return x
+}
+
+func (s *Stack) Reset() {
+ s.stack = nil
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/type_string.go b/vendor/github.com/hashicorp/hil/ast/type_string.go
new file mode 100644
index 00000000..1f51a98d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/type_string.go
@@ -0,0 +1,54 @@
+// Code generated by "stringer -type=Type"; DO NOT EDIT
+
+package ast
+
+import "fmt"
+
+const (
+ _Type_name_0 = "TypeInvalid"
+ _Type_name_1 = "TypeAny"
+ _Type_name_2 = "TypeBool"
+ _Type_name_3 = "TypeString"
+ _Type_name_4 = "TypeInt"
+ _Type_name_5 = "TypeFloat"
+ _Type_name_6 = "TypeList"
+ _Type_name_7 = "TypeMap"
+ _Type_name_8 = "TypeUnknown"
+)
+
+var (
+ _Type_index_0 = [...]uint8{0, 11}
+ _Type_index_1 = [...]uint8{0, 7}
+ _Type_index_2 = [...]uint8{0, 8}
+ _Type_index_3 = [...]uint8{0, 10}
+ _Type_index_4 = [...]uint8{0, 7}
+ _Type_index_5 = [...]uint8{0, 9}
+ _Type_index_6 = [...]uint8{0, 8}
+ _Type_index_7 = [...]uint8{0, 7}
+ _Type_index_8 = [...]uint8{0, 11}
+)
+
+func (i Type) String() string {
+ switch {
+ case i == 0:
+ return _Type_name_0
+ case i == 2:
+ return _Type_name_1
+ case i == 4:
+ return _Type_name_2
+ case i == 8:
+ return _Type_name_3
+ case i == 16:
+ return _Type_name_4
+ case i == 32:
+ return _Type_name_5
+ case i == 64:
+ return _Type_name_6
+ case i == 128:
+ return _Type_name_7
+ case i == 256:
+ return _Type_name_8
+ default:
+ return fmt.Sprintf("Type(%d)", i)
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/unknown.go b/vendor/github.com/hashicorp/hil/ast/unknown.go
new file mode 100644
index 00000000..d6ddaecc
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/unknown.go
@@ -0,0 +1,30 @@
+package ast
+
+// IsUnknown reports whether a variable is unknown or contains any value
+// that is unknown. This will recurse into lists and maps and so on.
+func IsUnknown(v Variable) bool {
+ // If it is unknown itself, return true
+ if v.Type == TypeUnknown {
+ return true
+ }
+
+ // If it is a container type, check the values
+ switch v.Type {
+ case TypeList:
+ for _, el := range v.Value.([]Variable) {
+ if IsUnknown(el) {
+ return true
+ }
+ }
+ case TypeMap:
+ for _, el := range v.Value.(map[string]Variable) {
+ if IsUnknown(el) {
+ return true
+ }
+ }
+ default:
+ }
+
+ // Not a container type or survive the above checks
+ return false
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/variable_access.go b/vendor/github.com/hashicorp/hil/ast/variable_access.go
new file mode 100644
index 00000000..4c1362d7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/variable_access.go
@@ -0,0 +1,36 @@
+package ast
+
+import (
+ "fmt"
+)
+
+// VariableAccess represents a variable access.
+type VariableAccess struct {
+ Name string
+ Posx Pos
+}
+
+func (n *VariableAccess) Accept(v Visitor) Node {
+ return v(n)
+}
+
+func (n *VariableAccess) Pos() Pos {
+ return n.Posx
+}
+
+func (n *VariableAccess) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *VariableAccess) String() string {
+ return fmt.Sprintf("Variable(%s)", n.Name)
+}
+
+func (n *VariableAccess) Type(s Scope) (Type, error) {
+ v, ok := s.LookupVar(n.Name)
+ if !ok {
+ return TypeInvalid, fmt.Errorf("unknown variable: %s", n.Name)
+ }
+
+ return v.Type, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/vendor/github.com/hashicorp/hil/ast/variables_helper.go
new file mode 100644
index 00000000..06bd18de
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/variables_helper.go
@@ -0,0 +1,63 @@
+package ast
+
+import "fmt"
+
+func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) {
+ if len(list) == 0 {
+ return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName)
+ }
+
+ elemType := TypeUnknown
+ for _, v := range list {
+ if v.Type == TypeUnknown {
+ continue
+ }
+
+ if elemType == TypeUnknown {
+ elemType = v.Type
+ continue
+ }
+
+ if v.Type != elemType {
+ return TypeInvalid, fmt.Errorf(
+ "list %q does not have homogenous types. found %s and then %s",
+ variableName,
+ elemType, v.Type,
+ )
+ }
+
+ elemType = v.Type
+ }
+
+ return elemType, nil
+}
+
+func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) {
+ if len(vmap) == 0 {
+ return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName)
+ }
+
+ elemType := TypeUnknown
+ for _, v := range vmap {
+ if v.Type == TypeUnknown {
+ continue
+ }
+
+ if elemType == TypeUnknown {
+ elemType = v.Type
+ continue
+ }
+
+ if v.Type != elemType {
+ return TypeInvalid, fmt.Errorf(
+ "map %q does not have homogenous types. found %s and then %s",
+ variableName,
+ elemType, v.Type,
+ )
+ }
+
+ elemType = v.Type
+ }
+
+ return elemType, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/builtins.go b/vendor/github.com/hashicorp/hil/builtins.go
new file mode 100644
index 00000000..909c788a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/builtins.go
@@ -0,0 +1,331 @@
+package hil
+
+import (
+ "errors"
+ "strconv"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+// NOTE: All builtins are tested in engine_test.go
+
+func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope {
+ if scope == nil {
+ scope = new(ast.BasicScope)
+ }
+ if scope.FuncMap == nil {
+ scope.FuncMap = make(map[string]ast.Function)
+ }
+
+ // Implicit conversions
+ scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString()
+ scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt()
+ scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString()
+ scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat()
+ scope.FuncMap["__builtin_IntToString"] = builtinIntToString()
+ scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt()
+ scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat()
+ scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool()
+
+ // Math operations
+ scope.FuncMap["__builtin_IntMath"] = builtinIntMath()
+ scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath()
+ scope.FuncMap["__builtin_BoolCompare"] = builtinBoolCompare()
+ scope.FuncMap["__builtin_FloatCompare"] = builtinFloatCompare()
+ scope.FuncMap["__builtin_IntCompare"] = builtinIntCompare()
+ scope.FuncMap["__builtin_StringCompare"] = builtinStringCompare()
+ scope.FuncMap["__builtin_Logical"] = builtinLogical()
+ return scope
+}
+
+func builtinFloatMath() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ Variadic: true,
+ VariadicType: ast.TypeFloat,
+ ReturnType: ast.TypeFloat,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ result := args[1].(float64)
+ for _, raw := range args[2:] {
+ arg := raw.(float64)
+ switch op {
+ case ast.ArithmeticOpAdd:
+ result += arg
+ case ast.ArithmeticOpSub:
+ result -= arg
+ case ast.ArithmeticOpMul:
+ result *= arg
+ case ast.ArithmeticOpDiv:
+ result /= arg
+ }
+ }
+
+ return result, nil
+ },
+ }
+}
+
+func builtinIntMath() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ Variadic: true,
+ VariadicType: ast.TypeInt,
+ ReturnType: ast.TypeInt,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ result := args[1].(int)
+ for _, raw := range args[2:] {
+ arg := raw.(int)
+ switch op {
+ case ast.ArithmeticOpAdd:
+ result += arg
+ case ast.ArithmeticOpSub:
+ result -= arg
+ case ast.ArithmeticOpMul:
+ result *= arg
+ case ast.ArithmeticOpDiv:
+ if arg == 0 {
+ return nil, errors.New("divide by zero")
+ }
+
+ result /= arg
+ case ast.ArithmeticOpMod:
+ if arg == 0 {
+ return nil, errors.New("divide by zero")
+ }
+
+ result = result % arg
+ }
+ }
+
+ return result, nil
+ },
+ }
+}
+
+func builtinBoolCompare() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt, ast.TypeBool, ast.TypeBool},
+ Variadic: false,
+ ReturnType: ast.TypeBool,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ lhs := args[1].(bool)
+ rhs := args[2].(bool)
+
+ switch op {
+ case ast.ArithmeticOpEqual:
+ return lhs == rhs, nil
+ case ast.ArithmeticOpNotEqual:
+ return lhs != rhs, nil
+ default:
+ return nil, errors.New("invalid comparison operation")
+ }
+ },
+ }
+}
+
+func builtinFloatCompare() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt, ast.TypeFloat, ast.TypeFloat},
+ Variadic: false,
+ ReturnType: ast.TypeBool,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ lhs := args[1].(float64)
+ rhs := args[2].(float64)
+
+ switch op {
+ case ast.ArithmeticOpEqual:
+ return lhs == rhs, nil
+ case ast.ArithmeticOpNotEqual:
+ return lhs != rhs, nil
+ case ast.ArithmeticOpLessThan:
+ return lhs < rhs, nil
+ case ast.ArithmeticOpLessThanOrEqual:
+ return lhs <= rhs, nil
+ case ast.ArithmeticOpGreaterThan:
+ return lhs > rhs, nil
+ case ast.ArithmeticOpGreaterThanOrEqual:
+ return lhs >= rhs, nil
+ default:
+ return nil, errors.New("invalid comparison operation")
+ }
+ },
+ }
+}
+
+func builtinIntCompare() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt, ast.TypeInt, ast.TypeInt},
+ Variadic: false,
+ ReturnType: ast.TypeBool,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ lhs := args[1].(int)
+ rhs := args[2].(int)
+
+ switch op {
+ case ast.ArithmeticOpEqual:
+ return lhs == rhs, nil
+ case ast.ArithmeticOpNotEqual:
+ return lhs != rhs, nil
+ case ast.ArithmeticOpLessThan:
+ return lhs < rhs, nil
+ case ast.ArithmeticOpLessThanOrEqual:
+ return lhs <= rhs, nil
+ case ast.ArithmeticOpGreaterThan:
+ return lhs > rhs, nil
+ case ast.ArithmeticOpGreaterThanOrEqual:
+ return lhs >= rhs, nil
+ default:
+ return nil, errors.New("invalid comparison operation")
+ }
+ },
+ }
+}
+
+func builtinStringCompare() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString, ast.TypeString},
+ Variadic: false,
+ ReturnType: ast.TypeBool,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ lhs := args[1].(string)
+ rhs := args[2].(string)
+
+ switch op {
+ case ast.ArithmeticOpEqual:
+ return lhs == rhs, nil
+ case ast.ArithmeticOpNotEqual:
+ return lhs != rhs, nil
+ default:
+ return nil, errors.New("invalid comparison operation")
+ }
+ },
+ }
+}
+
+func builtinLogical() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ Variadic: true,
+ VariadicType: ast.TypeBool,
+ ReturnType: ast.TypeBool,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ result := args[1].(bool)
+ for _, raw := range args[2:] {
+ arg := raw.(bool)
+ switch op {
+ case ast.ArithmeticOpLogicalOr:
+ result = result || arg
+ case ast.ArithmeticOpLogicalAnd:
+ result = result && arg
+ default:
+ return nil, errors.New("invalid logical operator")
+ }
+ }
+
+ return result, nil
+ },
+ }
+}
+
+func builtinFloatToInt() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ ReturnType: ast.TypeInt,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return int(args[0].(float64)), nil
+ },
+ }
+}
+
+func builtinFloatToString() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return strconv.FormatFloat(
+ args[0].(float64), 'g', -1, 64), nil
+ },
+ }
+}
+
+func builtinIntToFloat() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ ReturnType: ast.TypeFloat,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return float64(args[0].(int)), nil
+ },
+ }
+}
+
+func builtinIntToString() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return strconv.FormatInt(int64(args[0].(int)), 10), nil
+ },
+ }
+}
+
+func builtinStringToInt() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ v, err := strconv.ParseInt(args[0].(string), 0, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ return int(v), nil
+ },
+ }
+}
+
+func builtinStringToFloat() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeFloat,
+ Callback: func(args []interface{}) (interface{}, error) {
+ v, err := strconv.ParseFloat(args[0].(string), 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return v, nil
+ },
+ }
+}
+
+func builtinBoolToString() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeBool},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return strconv.FormatBool(args[0].(bool)), nil
+ },
+ }
+}
+
+func builtinStringToBool() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeBool,
+ Callback: func(args []interface{}) (interface{}, error) {
+ v, err := strconv.ParseBool(args[0].(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return v, nil
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/check_identifier.go b/vendor/github.com/hashicorp/hil/check_identifier.go
new file mode 100644
index 00000000..474f5058
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/check_identifier.go
@@ -0,0 +1,88 @@
+package hil
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+// IdentifierCheck is a SemanticCheck that checks that all identifiers
+// resolve properly and that the right number of arguments are passed
+// to functions.
+type IdentifierCheck struct {
+ Scope ast.Scope
+
+ err error
+ lock sync.Mutex
+}
+
+func (c *IdentifierCheck) Visit(root ast.Node) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ defer c.reset()
+ root.Accept(c.visit)
+ return c.err
+}
+
+func (c *IdentifierCheck) visit(raw ast.Node) ast.Node {
+ if c.err != nil {
+ return raw
+ }
+
+ switch n := raw.(type) {
+ case *ast.Call:
+ c.visitCall(n)
+ case *ast.VariableAccess:
+ c.visitVariableAccess(n)
+ case *ast.Output:
+ // Ignore
+ case *ast.LiteralNode:
+ // Ignore
+ default:
+ // Ignore
+ }
+
+ // We never do replacement with this visitor
+ return raw
+}
+
+func (c *IdentifierCheck) visitCall(n *ast.Call) {
+ // Look up the function in the map
+ function, ok := c.Scope.LookupFunc(n.Func)
+ if !ok {
+ c.createErr(n, fmt.Sprintf("unknown function called: %s", n.Func))
+ return
+ }
+
+ // Break up the args into what is variadic and what is required
+ args := n.Args
+ if function.Variadic && len(args) > len(function.ArgTypes) {
+ args = n.Args[:len(function.ArgTypes)]
+ }
+
+ // Verify the number of arguments
+ if len(args) != len(function.ArgTypes) {
+ c.createErr(n, fmt.Sprintf(
+ "%s: expected %d arguments, got %d",
+ n.Func, len(function.ArgTypes), len(n.Args)))
+ return
+ }
+}
+
+func (c *IdentifierCheck) visitVariableAccess(n *ast.VariableAccess) {
+ // Look up the variable in the map
+ if _, ok := c.Scope.LookupVar(n.Name); !ok {
+ c.createErr(n, fmt.Sprintf(
+ "unknown variable accessed: %s", n.Name))
+ return
+ }
+}
+
+func (c *IdentifierCheck) createErr(n ast.Node, str string) {
+ c.err = fmt.Errorf("%s: %s", n.Pos(), str)
+}
+
+func (c *IdentifierCheck) reset() {
+ c.err = nil
+}
diff --git a/vendor/github.com/hashicorp/hil/check_types.go b/vendor/github.com/hashicorp/hil/check_types.go
new file mode 100644
index 00000000..7a191e87
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/check_types.go
@@ -0,0 +1,662 @@
+package hil
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+// TypeCheck implements ast.Visitor for type checking an AST tree.
+// It requires some configuration to look up the type of nodes.
+//
+// It also optionally will not type error and will insert an implicit
+// type conversions for specific types if specified by the Implicit
+// field. Note that this is kind of organizationally weird to put into
+// this structure but we'd rather do that than duplicate the type checking
+// logic multiple times.
+type TypeCheck struct {
+ Scope ast.Scope
+
+ // Implicit is a map of implicit type conversions that we can do,
+ // and that shouldn't error. The key of the first map is the from type,
+ // the key of the second map is the to type, and the final string
+ // value is the function to call (which must be registered in the Scope).
+ Implicit map[ast.Type]map[ast.Type]string
+
+ // Stack of types. This shouldn't be used directly except by implementations
+ // of TypeCheckNode.
+ Stack []ast.Type
+
+ err error
+ lock sync.Mutex
+}
+
+// TypeCheckNode is the interface that must be implemented by any
+// ast.Node that wants to support type-checking. If the type checker
+// encounters a node that doesn't implement this, it will error.
+type TypeCheckNode interface {
+ TypeCheck(*TypeCheck) (ast.Node, error)
+}
+
+func (v *TypeCheck) Visit(root ast.Node) error {
+ v.lock.Lock()
+ defer v.lock.Unlock()
+ defer v.reset()
+ root.Accept(v.visit)
+
+ // If the resulting type is unknown, then just let the whole thing go.
+ if v.err == errExitUnknown {
+ v.err = nil
+ }
+
+ return v.err
+}
+
+func (v *TypeCheck) visit(raw ast.Node) ast.Node {
+ if v.err != nil {
+ return raw
+ }
+
+ var result ast.Node
+ var err error
+ switch n := raw.(type) {
+ case *ast.Arithmetic:
+ tc := &typeCheckArithmetic{n}
+ result, err = tc.TypeCheck(v)
+ case *ast.Call:
+ tc := &typeCheckCall{n}
+ result, err = tc.TypeCheck(v)
+ case *ast.Conditional:
+ tc := &typeCheckConditional{n}
+ result, err = tc.TypeCheck(v)
+ case *ast.Index:
+ tc := &typeCheckIndex{n}
+ result, err = tc.TypeCheck(v)
+ case *ast.Output:
+ tc := &typeCheckOutput{n}
+ result, err = tc.TypeCheck(v)
+ case *ast.LiteralNode:
+ tc := &typeCheckLiteral{n}
+ result, err = tc.TypeCheck(v)
+ case *ast.VariableAccess:
+ tc := &typeCheckVariableAccess{n}
+ result, err = tc.TypeCheck(v)
+ default:
+ tc, ok := raw.(TypeCheckNode)
+ if !ok {
+ err = fmt.Errorf("unknown node for type check: %#v", raw)
+ break
+ }
+
+ result, err = tc.TypeCheck(v)
+ }
+
+ if err != nil {
+ pos := raw.Pos()
+ v.err = fmt.Errorf("At column %d, line %d: %s",
+ pos.Column, pos.Line, err)
+ }
+
+ return result
+}
+
+type typeCheckArithmetic struct {
+ n *ast.Arithmetic
+}
+
+func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ // The arguments are on the stack in reverse order, so pop them off.
+ exprs := make([]ast.Type, len(tc.n.Exprs))
+ for i, _ := range tc.n.Exprs {
+ exprs[len(tc.n.Exprs)-1-i] = v.StackPop()
+ }
+
+ // If any operand is unknown then our result is automatically unknown
+ for _, ty := range exprs {
+ if ty == ast.TypeUnknown {
+ v.StackPush(ast.TypeUnknown)
+ return tc.n, nil
+ }
+ }
+
+ switch tc.n.Op {
+ case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr:
+ return tc.checkLogical(v, exprs)
+ case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual,
+ ast.ArithmeticOpLessThan, ast.ArithmeticOpGreaterThan,
+ ast.ArithmeticOpGreaterThanOrEqual, ast.ArithmeticOpLessThanOrEqual:
+ return tc.checkComparison(v, exprs)
+ default:
+ return tc.checkNumeric(v, exprs)
+ }
+
+}
+
+func (tc *typeCheckArithmetic) checkNumeric(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
+ // Determine the resulting type we want. We do this by going over
+ // every expression until we find one with a type we recognize.
+ // We do this because the first expr might be a string ("var.foo")
+ // and we need to know what to implicit to.
+ mathFunc := "__builtin_IntMath"
+ mathType := ast.TypeInt
+ for _, v := range exprs {
+ // We assume int math but if we find ANY float, the entire
+ // expression turns into floating point math.
+ if v == ast.TypeFloat {
+ mathFunc = "__builtin_FloatMath"
+ mathType = v
+ break
+ }
+ }
+
+ // Verify the args
+ for i, arg := range exprs {
+ if arg != mathType {
+ cn := v.ImplicitConversion(exprs[i], mathType, tc.n.Exprs[i])
+ if cn != nil {
+ tc.n.Exprs[i] = cn
+ continue
+ }
+
+ return nil, fmt.Errorf(
+ "operand %d should be %s, got %s",
+ i+1, mathType, arg)
+ }
+ }
+
+ // Modulo doesn't work for floats
+ if mathType == ast.TypeFloat && tc.n.Op == ast.ArithmeticOpMod {
+ return nil, fmt.Errorf("modulo cannot be used with floats")
+ }
+
+ // Return type
+ v.StackPush(mathType)
+
+ // Replace our node with a call to the proper function. This isn't
+ // type checked but we already verified types.
+ args := make([]ast.Node, len(tc.n.Exprs)+1)
+ args[0] = &ast.LiteralNode{
+ Value: tc.n.Op,
+ Typex: ast.TypeInt,
+ Posx: tc.n.Pos(),
+ }
+ copy(args[1:], tc.n.Exprs)
+ return &ast.Call{
+ Func: mathFunc,
+ Args: args,
+ Posx: tc.n.Pos(),
+ }, nil
+}
+
+func (tc *typeCheckArithmetic) checkComparison(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
+ if len(exprs) != 2 {
+ // This should never happen, because the parser never produces
+ // nodes that violate this.
+ return nil, fmt.Errorf(
+ "comparison operators must have exactly two operands",
+ )
+ }
+
+ // The first operand always dictates the type for a comparison.
+ compareFunc := ""
+ compareType := exprs[0]
+ switch compareType {
+ case ast.TypeBool:
+ compareFunc = "__builtin_BoolCompare"
+ case ast.TypeFloat:
+ compareFunc = "__builtin_FloatCompare"
+ case ast.TypeInt:
+ compareFunc = "__builtin_IntCompare"
+ case ast.TypeString:
+ compareFunc = "__builtin_StringCompare"
+ default:
+ return nil, fmt.Errorf(
+ "comparison operators apply only to bool, float, int, and string",
+ )
+ }
+
+ // For non-equality comparisons, we will do implicit conversions to
+ // integer types if possible. In this case, we need to go through and
+ // determine the type of comparison we're doing to enable the implicit
+ // conversion.
+ if tc.n.Op != ast.ArithmeticOpEqual && tc.n.Op != ast.ArithmeticOpNotEqual {
+ compareFunc = "__builtin_IntCompare"
+ compareType = ast.TypeInt
+ for _, expr := range exprs {
+ if expr == ast.TypeFloat {
+ compareFunc = "__builtin_FloatCompare"
+ compareType = ast.TypeFloat
+ break
+ }
+ }
+ }
+
+ // Verify (and possibly, convert) the args
+ for i, arg := range exprs {
+ if arg != compareType {
+ cn := v.ImplicitConversion(exprs[i], compareType, tc.n.Exprs[i])
+ if cn != nil {
+ tc.n.Exprs[i] = cn
+ continue
+ }
+
+ return nil, fmt.Errorf(
+ "operand %d should be %s, got %s",
+ i+1, compareType, arg,
+ )
+ }
+ }
+
+ // Only ints and floats can have the <, >, <= and >= operators applied
+ switch tc.n.Op {
+ case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual:
+ // anything goes
+ default:
+ switch compareType {
+ case ast.TypeFloat, ast.TypeInt:
+ // fine
+ default:
+ return nil, fmt.Errorf(
+ "<, >, <= and >= may apply only to int and float values",
+ )
+ }
+ }
+
+ // Comparison operators always return bool
+ v.StackPush(ast.TypeBool)
+
+ // Replace our node with a call to the proper function. This isn't
+ // type checked but we already verified types.
+ args := make([]ast.Node, len(tc.n.Exprs)+1)
+ args[0] = &ast.LiteralNode{
+ Value: tc.n.Op,
+ Typex: ast.TypeInt,
+ Posx: tc.n.Pos(),
+ }
+ copy(args[1:], tc.n.Exprs)
+ return &ast.Call{
+ Func: compareFunc,
+ Args: args,
+ Posx: tc.n.Pos(),
+ }, nil
+}
+
+func (tc *typeCheckArithmetic) checkLogical(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
+ for i, t := range exprs {
+ if t != ast.TypeBool {
+ cn := v.ImplicitConversion(t, ast.TypeBool, tc.n.Exprs[i])
+ if cn == nil {
+ return nil, fmt.Errorf(
+ "logical operators require boolean operands, not %s",
+ t,
+ )
+ }
+ tc.n.Exprs[i] = cn
+ }
+ }
+
+ // Return type is always boolean
+ v.StackPush(ast.TypeBool)
+
+ // Arithmetic nodes are replaced with a call to a built-in function
+ args := make([]ast.Node, len(tc.n.Exprs)+1)
+ args[0] = &ast.LiteralNode{
+ Value: tc.n.Op,
+ Typex: ast.TypeInt,
+ Posx: tc.n.Pos(),
+ }
+ copy(args[1:], tc.n.Exprs)
+ return &ast.Call{
+ Func: "__builtin_Logical",
+ Args: args,
+ Posx: tc.n.Pos(),
+ }, nil
+}
+
+type typeCheckCall struct {
+ n *ast.Call
+}
+
+func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ // Look up the function in the map
+ function, ok := v.Scope.LookupFunc(tc.n.Func)
+ if !ok {
+ return nil, fmt.Errorf("unknown function called: %s", tc.n.Func)
+ }
+
+ // The arguments are on the stack in reverse order, so pop them off.
+ args := make([]ast.Type, len(tc.n.Args))
+ for i, _ := range tc.n.Args {
+ args[len(tc.n.Args)-1-i] = v.StackPop()
+ }
+
+ // Verify the args
+ for i, expected := range function.ArgTypes {
+ if expected == ast.TypeAny {
+ continue
+ }
+
+ if args[i] == ast.TypeUnknown {
+ v.StackPush(ast.TypeUnknown)
+ return tc.n, nil
+ }
+
+ if args[i] != expected {
+ cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i])
+ if cn != nil {
+ tc.n.Args[i] = cn
+ continue
+ }
+
+ return nil, fmt.Errorf(
+ "%s: argument %d should be %s, got %s",
+ tc.n.Func, i+1, expected.Printable(), args[i].Printable())
+ }
+ }
+
+ // If we're variadic, then verify the types there
+ if function.Variadic && function.VariadicType != ast.TypeAny {
+ args = args[len(function.ArgTypes):]
+ for i, t := range args {
+ if t == ast.TypeUnknown {
+ v.StackPush(ast.TypeUnknown)
+ return tc.n, nil
+ }
+
+ if t != function.VariadicType {
+ realI := i + len(function.ArgTypes)
+ cn := v.ImplicitConversion(
+ t, function.VariadicType, tc.n.Args[realI])
+ if cn != nil {
+ tc.n.Args[realI] = cn
+ continue
+ }
+
+ return nil, fmt.Errorf(
+ "%s: argument %d should be %s, got %s",
+ tc.n.Func, realI,
+ function.VariadicType.Printable(), t.Printable())
+ }
+ }
+ }
+
+ // Return type
+ v.StackPush(function.ReturnType)
+
+ return tc.n, nil
+}
+
+type typeCheckConditional struct {
+ n *ast.Conditional
+}
+
+func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ // On the stack we have the types of the condition, true and false
+ // expressions, but they are in reverse order.
+ falseType := v.StackPop()
+ trueType := v.StackPop()
+ condType := v.StackPop()
+
+ if condType == ast.TypeUnknown {
+ v.StackPush(ast.TypeUnknown)
+ return tc.n, nil
+ }
+
+ if condType != ast.TypeBool {
+ cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr)
+ if cn == nil {
+ return nil, fmt.Errorf(
+ "condition must be type bool, not %s", condType.Printable(),
+ )
+ }
+ tc.n.CondExpr = cn
+ }
+
+ // The types of the true and false expression must match
+ if trueType != falseType {
+
+ // Since passing around stringified versions of other types is
+ // common, we pragmatically allow the false expression to dictate
+ // the result type when the true expression is a string.
+ if trueType == ast.TypeString {
+ cn := v.ImplicitConversion(trueType, falseType, tc.n.TrueExpr)
+ if cn == nil {
+ return nil, fmt.Errorf(
+ "true and false expression types must match; have %s and %s",
+ trueType.Printable(), falseType.Printable(),
+ )
+ }
+ tc.n.TrueExpr = cn
+ trueType = falseType
+ } else {
+ cn := v.ImplicitConversion(falseType, trueType, tc.n.FalseExpr)
+ if cn == nil {
+ return nil, fmt.Errorf(
+ "true and false expression types must match; have %s and %s",
+ trueType.Printable(), falseType.Printable(),
+ )
+ }
+ tc.n.FalseExpr = cn
+ falseType = trueType
+ }
+ }
+
+ // Currently list and map types cannot be used, because we cannot
+ // generally assert that their element types are consistent.
+ // Such support might be added later, either by improving the type
+ // system or restricting usage to only variable and literal expressions,
+ // but for now this is simply prohibited because it doesn't seem to
+ // be a common enough case to be worth the complexity.
+ switch trueType {
+ case ast.TypeList:
+ return nil, fmt.Errorf(
+ "conditional operator cannot be used with list values",
+ )
+ case ast.TypeMap:
+ return nil, fmt.Errorf(
+ "conditional operator cannot be used with map values",
+ )
+ }
+
+ // Result type (guaranteed to also match falseType due to the above)
+ v.StackPush(trueType)
+
+ return tc.n, nil
+}
+
+type typeCheckOutput struct {
+ n *ast.Output
+}
+
+func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ n := tc.n
+ types := make([]ast.Type, len(n.Exprs))
+ for i, _ := range n.Exprs {
+ types[len(n.Exprs)-1-i] = v.StackPop()
+ }
+
+ for _, ty := range types {
+ if ty == ast.TypeUnknown {
+ v.StackPush(ast.TypeUnknown)
+ return tc.n, nil
+ }
+ }
+
+ // If there is only one argument and it is a list, we evaluate to a list
+ if len(types) == 1 {
+ switch t := types[0]; t {
+ case ast.TypeList:
+ fallthrough
+ case ast.TypeMap:
+ v.StackPush(t)
+ return n, nil
+ }
+ }
+
+ // Otherwise, all concat args must be strings, so validate that
+ resultType := ast.TypeString
+ for i, t := range types {
+
+ if t == ast.TypeUnknown {
+ resultType = ast.TypeUnknown
+ continue
+ }
+
+ if t != ast.TypeString {
+ cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i])
+ if cn != nil {
+ n.Exprs[i] = cn
+ continue
+ }
+
+ return nil, fmt.Errorf(
+ "output of an HIL expression must be a string, or a single list (argument %d is %s)", i+1, t)
+ }
+ }
+
+ // This always results in type string, unless there are unknowns
+ v.StackPush(resultType)
+
+ return n, nil
+}
+
+type typeCheckLiteral struct {
+ n *ast.LiteralNode
+}
+
+func (tc *typeCheckLiteral) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ v.StackPush(tc.n.Typex)
+ return tc.n, nil
+}
+
+type typeCheckVariableAccess struct {
+ n *ast.VariableAccess
+}
+
+func (tc *typeCheckVariableAccess) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ // Look up the variable in the map
+ variable, ok := v.Scope.LookupVar(tc.n.Name)
+ if !ok {
+ return nil, fmt.Errorf(
+ "unknown variable accessed: %s", tc.n.Name)
+ }
+
+ // Add the type to the stack
+ v.StackPush(variable.Type)
+
+ return tc.n, nil
+}
+
+type typeCheckIndex struct {
+ n *ast.Index
+}
+
+func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ keyType := v.StackPop()
+ targetType := v.StackPop()
+
+ if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown {
+ v.StackPush(ast.TypeUnknown)
+ return tc.n, nil
+ }
+
+ // Ensure we have a VariableAccess as the target
+ varAccessNode, ok := tc.n.Target.(*ast.VariableAccess)
+ if !ok {
+ return nil, fmt.Errorf(
+ "target of an index must be a VariableAccess node, was %T", tc.n.Target)
+ }
+
+ // Get the variable
+ variable, ok := v.Scope.LookupVar(varAccessNode.Name)
+ if !ok {
+ return nil, fmt.Errorf(
+ "unknown variable accessed: %s", varAccessNode.Name)
+ }
+
+ switch targetType {
+ case ast.TypeList:
+ if keyType != ast.TypeInt {
+ tc.n.Key = v.ImplicitConversion(keyType, ast.TypeInt, tc.n.Key)
+ if tc.n.Key == nil {
+ return nil, fmt.Errorf(
+ "key of an index must be an int, was %s", keyType)
+ }
+ }
+
+ valType, err := ast.VariableListElementTypesAreHomogenous(
+ varAccessNode.Name, variable.Value.([]ast.Variable))
+ if err != nil {
+ return tc.n, err
+ }
+
+ v.StackPush(valType)
+ return tc.n, nil
+ case ast.TypeMap:
+ if keyType != ast.TypeString {
+ tc.n.Key = v.ImplicitConversion(keyType, ast.TypeString, tc.n.Key)
+ if tc.n.Key == nil {
+ return nil, fmt.Errorf(
+ "key of an index must be a string, was %s", keyType)
+ }
+ }
+
+ valType, err := ast.VariableMapValueTypesAreHomogenous(
+ varAccessNode.Name, variable.Value.(map[string]ast.Variable))
+ if err != nil {
+ return tc.n, err
+ }
+
+ v.StackPush(valType)
+ return tc.n, nil
+ default:
+ return nil, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type)
+ }
+}
+
+func (v *TypeCheck) ImplicitConversion(
+ actual ast.Type, expected ast.Type, n ast.Node) ast.Node {
+ if v.Implicit == nil {
+ return nil
+ }
+
+ fromMap, ok := v.Implicit[actual]
+ if !ok {
+ return nil
+ }
+
+ toFunc, ok := fromMap[expected]
+ if !ok {
+ return nil
+ }
+
+ return &ast.Call{
+ Func: toFunc,
+ Args: []ast.Node{n},
+ Posx: n.Pos(),
+ }
+}
+
+func (v *TypeCheck) reset() {
+ v.Stack = nil
+ v.err = nil
+}
+
+func (v *TypeCheck) StackPush(t ast.Type) {
+ v.Stack = append(v.Stack, t)
+}
+
+func (v *TypeCheck) StackPop() ast.Type {
+ var x ast.Type
+ x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1]
+ return x
+}
+
+func (v *TypeCheck) StackPeek() ast.Type {
+ if len(v.Stack) == 0 {
+ return ast.TypeInvalid
+ }
+
+ return v.Stack[len(v.Stack)-1]
+}
diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go
new file mode 100644
index 00000000..f2024d01
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/convert.go
@@ -0,0 +1,159 @@
+package hil
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/hashicorp/hil/ast"
+ "github.com/mitchellh/mapstructure"
+)
+
+// UnknownValue is a sentinel value that can be used to denote
+// that a value of a variable (or map element, list element, etc.)
+// is unknown. This will always have the type ast.TypeUnknown.
+const UnknownValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
+
+var hilMapstructureDecodeHookSlice []interface{}
+var hilMapstructureDecodeHookStringSlice []string
+var hilMapstructureDecodeHookMap map[string]interface{}
+
+// hilMapstructureWeakDecode behaves in the same way as mapstructure.WeakDecode
+// but has a DecodeHook which defeats the backward compatibility mode of mapstructure
+// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This
+// allows us to use WeakDecode (desirable), but not fail on empty lists.
+func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error {
+ config := &mapstructure.DecoderConfig{
+ DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) {
+ sliceType := reflect.TypeOf(hilMapstructureDecodeHookSlice)
+ stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice)
+ mapType := reflect.TypeOf(hilMapstructureDecodeHookMap)
+
+ if (source == sliceType || source == stringSliceType) && target == mapType {
+ return nil, fmt.Errorf("Cannot convert %s into a %s", source, target)
+ }
+
+ return val, nil
+ },
+ WeaklyTypedInput: true,
+ Result: rawVal,
+ }
+
+ decoder, err := mapstructure.NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(m)
+}
+
+func InterfaceToVariable(input interface{}) (ast.Variable, error) {
+ if inputVariable, ok := input.(ast.Variable); ok {
+ return inputVariable, nil
+ }
+
+ var stringVal string
+ if err := hilMapstructureWeakDecode(input, &stringVal); err == nil {
+ // Special case the unknown value to turn into "unknown"
+ if stringVal == UnknownValue {
+ return ast.Variable{Value: UnknownValue, Type: ast.TypeUnknown}, nil
+ }
+
+ // Otherwise return the string value
+ return ast.Variable{
+ Type: ast.TypeString,
+ Value: stringVal,
+ }, nil
+ }
+
+ var mapVal map[string]interface{}
+ if err := hilMapstructureWeakDecode(input, &mapVal); err == nil {
+ elements := make(map[string]ast.Variable)
+ for i, element := range mapVal {
+ varElement, err := InterfaceToVariable(element)
+ if err != nil {
+ return ast.Variable{}, err
+ }
+ elements[i] = varElement
+ }
+
+ return ast.Variable{
+ Type: ast.TypeMap,
+ Value: elements,
+ }, nil
+ }
+
+ var sliceVal []interface{}
+ if err := hilMapstructureWeakDecode(input, &sliceVal); err == nil {
+ elements := make([]ast.Variable, len(sliceVal))
+ for i, element := range sliceVal {
+ varElement, err := InterfaceToVariable(element)
+ if err != nil {
+ return ast.Variable{}, err
+ }
+ elements[i] = varElement
+ }
+
+ return ast.Variable{
+ Type: ast.TypeList,
+ Value: elements,
+ }, nil
+ }
+
+ return ast.Variable{}, fmt.Errorf("value for conversion must be a string, interface{} or map[string]interface: got %T", input)
+}
+
+func VariableToInterface(input ast.Variable) (interface{}, error) {
+ if input.Type == ast.TypeString {
+ if inputStr, ok := input.Value.(string); ok {
+ return inputStr, nil
+ } else {
+ return nil, fmt.Errorf("ast.Variable with type string has value which is not a string")
+ }
+ }
+
+ if input.Type == ast.TypeList {
+ inputList, ok := input.Value.([]ast.Variable)
+ if !ok {
+ return nil, fmt.Errorf("ast.Variable with type list has value which is not a []ast.Variable")
+ }
+
+ result := make([]interface{}, 0)
+ if len(inputList) == 0 {
+ return result, nil
+ }
+
+ for _, element := range inputList {
+ if convertedElement, err := VariableToInterface(element); err == nil {
+ result = append(result, convertedElement)
+ } else {
+ return nil, err
+ }
+ }
+
+ return result, nil
+ }
+
+ if input.Type == ast.TypeMap {
+ inputMap, ok := input.Value.(map[string]ast.Variable)
+ if !ok {
+ return nil, fmt.Errorf("ast.Variable with type map has value which is not a map[string]ast.Variable")
+ }
+
+ result := make(map[string]interface{}, 0)
+ if len(inputMap) == 0 {
+ return result, nil
+ }
+
+ for key, value := range inputMap {
+ if convertedValue, err := VariableToInterface(value); err == nil {
+ result[key] = convertedValue
+ } else {
+ return nil, err
+ }
+ }
+
+ return result, nil
+ }
+
+ return nil, fmt.Errorf("unknown input type: %s", input.Type)
+}
diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go
new file mode 100644
index 00000000..27820769
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/eval.go
@@ -0,0 +1,472 @@
+package hil
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+// EvalConfig is the configuration for evaluating.
+type EvalConfig struct {
+ // GlobalScope is the global scope of execution for evaluation.
+ GlobalScope *ast.BasicScope
+
+ // SemanticChecks is a list of additional semantic checks that will be run
+ // on the tree prior to evaluating it. The type checker, identifier checker,
+ // etc. will be run before these automatically.
+ SemanticChecks []SemanticChecker
+}
+
+// SemanticChecker is the type that must be implemented to do a
+// semantic check on an AST tree. This will be called with the root node.
+type SemanticChecker func(ast.Node) error
+
+// EvaluationResult is a struct returned from the hil.Eval function,
+// representing the result of an interpolation. Results are returned in their
+// "natural" Go structure rather than in terms of the HIL AST. For the types
+// currently implemented, this means that the Value field can be interpreted as
+// the following Go types:
+// TypeInvalid: undefined
+// TypeString: string
+// TypeList: []interface{}
+// TypeMap: map[string]interface{}
+// TypBool: bool
+type EvaluationResult struct {
+ Type EvalType
+ Value interface{}
+}
+
+// InvalidResult is a structure representing the result of a HIL interpolation
+// which has invalid syntax, missing variables, or some other type of error.
+// The error is described out of band in the accompanying error return value.
+var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil}
+
+// errExitUnknown is an internal error that when returned means the result
+// is an unknown value. We use this for early exit.
+var errExitUnknown = errors.New("unknown value")
+
+func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) {
+ output, outputType, err := internalEval(root, config)
+ if err != nil {
+ return InvalidResult, err
+ }
+
+ // If the result contains any nested unknowns then the result as a whole
+ // is unknown, so that callers only have to deal with "entirely known"
+ // or "entirely unknown" as outcomes.
+ if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) {
+ outputType = ast.TypeUnknown
+ output = UnknownValue
+ }
+
+ switch outputType {
+ case ast.TypeList:
+ val, err := VariableToInterface(ast.Variable{
+ Type: ast.TypeList,
+ Value: output,
+ })
+ return EvaluationResult{
+ Type: TypeList,
+ Value: val,
+ }, err
+ case ast.TypeMap:
+ val, err := VariableToInterface(ast.Variable{
+ Type: ast.TypeMap,
+ Value: output,
+ })
+ return EvaluationResult{
+ Type: TypeMap,
+ Value: val,
+ }, err
+ case ast.TypeString:
+ return EvaluationResult{
+ Type: TypeString,
+ Value: output,
+ }, nil
+ case ast.TypeBool:
+ return EvaluationResult{
+ Type: TypeBool,
+ Value: output,
+ }, nil
+ case ast.TypeUnknown:
+ return EvaluationResult{
+ Type: TypeUnknown,
+ Value: UnknownValue,
+ }, nil
+ default:
+ return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType)
+ }
+}
+
+// Eval evaluates the given AST tree and returns its output value, the type
+// of the output, and any error that occurred.
+func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, error) {
+ // Copy the scope so we can add our builtins
+ if config == nil {
+ config = new(EvalConfig)
+ }
+ scope := registerBuiltins(config.GlobalScope)
+ implicitMap := map[ast.Type]map[ast.Type]string{
+ ast.TypeFloat: {
+ ast.TypeInt: "__builtin_FloatToInt",
+ ast.TypeString: "__builtin_FloatToString",
+ },
+ ast.TypeInt: {
+ ast.TypeFloat: "__builtin_IntToFloat",
+ ast.TypeString: "__builtin_IntToString",
+ },
+ ast.TypeString: {
+ ast.TypeInt: "__builtin_StringToInt",
+ ast.TypeFloat: "__builtin_StringToFloat",
+ ast.TypeBool: "__builtin_StringToBool",
+ },
+ ast.TypeBool: {
+ ast.TypeString: "__builtin_BoolToString",
+ },
+ }
+
+ // Build our own semantic checks that we always run
+ tv := &TypeCheck{Scope: scope, Implicit: implicitMap}
+ ic := &IdentifierCheck{Scope: scope}
+
+ // Build up the semantic checks for execution
+ checks := make(
+ []SemanticChecker,
+ len(config.SemanticChecks),
+ len(config.SemanticChecks)+2)
+ copy(checks, config.SemanticChecks)
+ checks = append(checks, ic.Visit)
+ checks = append(checks, tv.Visit)
+
+ // Run the semantic checks
+ for _, check := range checks {
+ if err := check(root); err != nil {
+ return nil, ast.TypeInvalid, err
+ }
+ }
+
+ // Execute
+ v := &evalVisitor{Scope: scope}
+ return v.Visit(root)
+}
+
+// EvalNode is the interface that must be implemented by any ast.Node
+// to support evaluation. This will be called in visitor pattern order.
+// The result of each call to Eval is automatically pushed onto the
+// stack as a LiteralNode. Pop elements off the stack to get child
+// values.
+type EvalNode interface {
+ Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error)
+}
+
+type evalVisitor struct {
+ Scope ast.Scope
+ Stack ast.Stack
+
+ err error
+ lock sync.Mutex
+}
+
+func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) {
+ // Run the actual visitor pattern
+ root.Accept(v.visit)
+
+ // Get our result and clear out everything else
+ var result *ast.LiteralNode
+ if v.Stack.Len() > 0 {
+ result = v.Stack.Pop().(*ast.LiteralNode)
+ } else {
+ result = new(ast.LiteralNode)
+ }
+ resultErr := v.err
+ if resultErr == errExitUnknown {
+ // This means the return value is unknown and we used the error
+ // as an early exit mechanism. Reset since the value on the stack
+ // should be the unknown value.
+ resultErr = nil
+ }
+
+ // Clear everything else so we aren't just dangling
+ v.Stack.Reset()
+ v.err = nil
+
+ t, err := result.Type(v.Scope)
+ if err != nil {
+ return nil, ast.TypeInvalid, err
+ }
+
+ return result.Value, t, resultErr
+}
+
+func (v *evalVisitor) visit(raw ast.Node) ast.Node {
+ if v.err != nil {
+ return raw
+ }
+
+ en, err := evalNode(raw)
+ if err != nil {
+ v.err = err
+ return raw
+ }
+
+ out, outType, err := en.Eval(v.Scope, &v.Stack)
+ if err != nil {
+ v.err = err
+ return raw
+ }
+
+ v.Stack.Push(&ast.LiteralNode{
+ Value: out,
+ Typex: outType,
+ })
+
+ if outType == ast.TypeUnknown {
+ // Halt immediately
+ v.err = errExitUnknown
+ return raw
+ }
+
+ return raw
+}
+
+// evalNode is a private function that returns an EvalNode for built-in
+// types as well as any other EvalNode implementations.
+func evalNode(raw ast.Node) (EvalNode, error) {
+ switch n := raw.(type) {
+ case *ast.Index:
+ return &evalIndex{n}, nil
+ case *ast.Call:
+ return &evalCall{n}, nil
+ case *ast.Conditional:
+ return &evalConditional{n}, nil
+ case *ast.Output:
+ return &evalOutput{n}, nil
+ case *ast.LiteralNode:
+ return &evalLiteralNode{n}, nil
+ case *ast.VariableAccess:
+ return &evalVariableAccess{n}, nil
+ default:
+ en, ok := n.(EvalNode)
+ if !ok {
+ return nil, fmt.Errorf("node doesn't support evaluation: %#v", raw)
+ }
+
+ return en, nil
+ }
+}
+
+type evalCall struct{ *ast.Call }
+
+func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
+ // Look up the function in the map
+ function, ok := s.LookupFunc(v.Func)
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "unknown function called: %s", v.Func)
+ }
+
+ // The arguments are on the stack in reverse order, so pop them off.
+ args := make([]interface{}, len(v.Args))
+ for i, _ := range v.Args {
+ node := stack.Pop().(*ast.LiteralNode)
+ if node.IsUnknown() {
+ // If any arguments are unknown then the result is automatically unknown
+ return UnknownValue, ast.TypeUnknown, nil
+ }
+ args[len(v.Args)-1-i] = node.Value
+ }
+
+ // Call the function
+ result, err := function.Callback(args)
+ if err != nil {
+ return nil, ast.TypeInvalid, fmt.Errorf("%s: %s", v.Func, err)
+ }
+
+ return result, function.ReturnType, nil
+}
+
+type evalConditional struct{ *ast.Conditional }
+
+func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
+ // On the stack we have literal nodes representing the resulting values
+ // of the condition, true and false expressions, but they are in reverse
+ // order.
+ falseLit := stack.Pop().(*ast.LiteralNode)
+ trueLit := stack.Pop().(*ast.LiteralNode)
+ condLit := stack.Pop().(*ast.LiteralNode)
+
+ if condLit.IsUnknown() {
+ // If our conditional is unknown then our result is also unknown
+ return UnknownValue, ast.TypeUnknown, nil
+ }
+
+ if condLit.Value.(bool) {
+ return trueLit.Value, trueLit.Typex, nil
+ } else {
+ return falseLit.Value, trueLit.Typex, nil
+ }
+}
+
+type evalIndex struct{ *ast.Index }
+
+func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
+ key := stack.Pop().(*ast.LiteralNode)
+ target := stack.Pop().(*ast.LiteralNode)
+
+ variableName := v.Index.Target.(*ast.VariableAccess).Name
+
+ if key.IsUnknown() {
+ // If our key is unknown then our result is also unknown
+ return UnknownValue, ast.TypeUnknown, nil
+ }
+
+ // For target, we'll accept collections containing unknown values but
+ // we still need to catch when the collection itself is unknown, shallowly.
+ if target.Typex == ast.TypeUnknown {
+ return UnknownValue, ast.TypeUnknown, nil
+ }
+
+ switch target.Typex {
+ case ast.TypeList:
+ return v.evalListIndex(variableName, target.Value, key.Value)
+ case ast.TypeMap:
+ return v.evalMapIndex(variableName, target.Value, key.Value)
+ default:
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "target %q for indexing must be ast.TypeList or ast.TypeMap, is %s",
+ variableName, target.Typex)
+ }
+}
+
+func (v *evalIndex) evalListIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) {
+ // We assume type checking was already done and we can assume that target
+ // is a list and key is an int
+ list, ok := target.([]ast.Variable)
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "cannot cast target to []Variable, is: %T", target)
+ }
+
+ keyInt, ok := key.(int)
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "cannot cast key to int, is: %T", key)
+ }
+
+ if len(list) == 0 {
+ return nil, ast.TypeInvalid, fmt.Errorf("list is empty")
+ }
+
+ if keyInt < 0 || len(list) < keyInt+1 {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "index %d out of range for list %s (max %d)",
+ keyInt, variableName, len(list))
+ }
+
+ returnVal := list[keyInt].Value
+ returnType := list[keyInt].Type
+ return returnVal, returnType, nil
+}
+
+func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) {
+ // We assume type checking was already done and we can assume that target
+ // is a map and key is a string
+ vmap, ok := target.(map[string]ast.Variable)
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "cannot cast target to map[string]Variable, is: %T", target)
+ }
+
+ keyString, ok := key.(string)
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "cannot cast key to string, is: %T", key)
+ }
+
+ if len(vmap) == 0 {
+ return nil, ast.TypeInvalid, fmt.Errorf("map is empty")
+ }
+
+ value, ok := vmap[keyString]
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "key %q does not exist in map %s", keyString, variableName)
+ }
+
+ return value.Value, value.Type, nil
+}
+
+type evalOutput struct{ *ast.Output }
+
+func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
+ // The expressions should all be on the stack in reverse
+ // order. So pop them off, reverse their order, and concatenate.
+ nodes := make([]*ast.LiteralNode, 0, len(v.Exprs))
+ haveUnknown := false
+ for range v.Exprs {
+ n := stack.Pop().(*ast.LiteralNode)
+ nodes = append(nodes, n)
+
+ // If we have any unknowns then the whole result is unknown
+ // (we must deal with this first, because the type checker can
+ // skip type conversions in the presence of unknowns, and thus
+ // any of our other nodes may be incorrectly typed.)
+ if n.IsUnknown() {
+ haveUnknown = true
+ }
+ }
+
+ if haveUnknown {
+ return UnknownValue, ast.TypeUnknown, nil
+ }
+
+ // Special case the single list and map
+ if len(nodes) == 1 {
+ switch t := nodes[0].Typex; t {
+ case ast.TypeList:
+ fallthrough
+ case ast.TypeMap:
+ fallthrough
+ case ast.TypeUnknown:
+ return nodes[0].Value, t, nil
+ }
+ }
+
+ // Otherwise concatenate the strings
+ var buf bytes.Buffer
+ for i := len(nodes) - 1; i >= 0; i-- {
+ if nodes[i].Typex != ast.TypeString {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "invalid output with %s value at index %d: %#v",
+ nodes[i].Typex,
+ i,
+ nodes[i].Value,
+ )
+ }
+ buf.WriteString(nodes[i].Value.(string))
+ }
+
+ return buf.String(), ast.TypeString, nil
+}
+
+type evalLiteralNode struct{ *ast.LiteralNode }
+
+func (v *evalLiteralNode) Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) {
+ return v.Value, v.Typex, nil
+}
+
+type evalVariableAccess struct{ *ast.VariableAccess }
+
+func (v *evalVariableAccess) Eval(scope ast.Scope, _ *ast.Stack) (interface{}, ast.Type, error) {
+ // Look up the variable in the map
+ variable, ok := scope.LookupVar(v.Name)
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "unknown variable accessed: %s", v.Name)
+ }
+
+ return variable.Value, variable.Type, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/eval_type.go b/vendor/github.com/hashicorp/hil/eval_type.go
new file mode 100644
index 00000000..6946ecd2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/eval_type.go
@@ -0,0 +1,16 @@
+package hil
+
+//go:generate stringer -type=EvalType eval_type.go
+
+// EvalType represents the type of the output returned from a HIL
+// evaluation.
+type EvalType uint32
+
+const (
+ TypeInvalid EvalType = 0
+ TypeString EvalType = 1 << iota
+ TypeBool
+ TypeList
+ TypeMap
+ TypeUnknown
+)
diff --git a/vendor/github.com/hashicorp/hil/evaltype_string.go b/vendor/github.com/hashicorp/hil/evaltype_string.go
new file mode 100644
index 00000000..b107ddd4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/evaltype_string.go
@@ -0,0 +1,42 @@
+// Code generated by "stringer -type=EvalType eval_type.go"; DO NOT EDIT
+
+package hil
+
+import "fmt"
+
+const (
+ _EvalType_name_0 = "TypeInvalid"
+ _EvalType_name_1 = "TypeString"
+ _EvalType_name_2 = "TypeBool"
+ _EvalType_name_3 = "TypeList"
+ _EvalType_name_4 = "TypeMap"
+ _EvalType_name_5 = "TypeUnknown"
+)
+
+var (
+ _EvalType_index_0 = [...]uint8{0, 11}
+ _EvalType_index_1 = [...]uint8{0, 10}
+ _EvalType_index_2 = [...]uint8{0, 8}
+ _EvalType_index_3 = [...]uint8{0, 8}
+ _EvalType_index_4 = [...]uint8{0, 7}
+ _EvalType_index_5 = [...]uint8{0, 11}
+)
+
+func (i EvalType) String() string {
+ switch {
+ case i == 0:
+ return _EvalType_name_0
+ case i == 2:
+ return _EvalType_name_1
+ case i == 4:
+ return _EvalType_name_2
+ case i == 8:
+ return _EvalType_name_3
+ case i == 16:
+ return _EvalType_name_4
+ case i == 32:
+ return _EvalType_name_5
+ default:
+ return fmt.Sprintf("EvalType(%d)", i)
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/parse.go b/vendor/github.com/hashicorp/hil/parse.go
new file mode 100644
index 00000000..ecbe1fdb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parse.go
@@ -0,0 +1,29 @@
+package hil
+
+import (
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/hil/parser"
+ "github.com/hashicorp/hil/scanner"
+)
+
+// Parse parses the given program and returns an executable AST tree.
+//
+// Syntax errors are returned with error having the dynamic type
+// *parser.ParseError, which gives the caller access to the source position
+// where the error was found, which allows (for example) combining it with
+// a known source filename to add context to the error message.
+func Parse(v string) (ast.Node, error) {
+ return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1})
+}
+
+// ParseWithPosition is like Parse except that it overrides the source
+// row and column position of the first character in the string, which should
+// be 1-based.
+//
+// This can be used when HIL is embedded in another language and the outer
+// parser knows the row and column where the HIL expression started within
+// the overall source file.
+func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) {
+ ch := scanner.Scan(v, pos)
+ return parser.Parse(ch)
+}
diff --git a/vendor/github.com/hashicorp/hil/parser/binary_op.go b/vendor/github.com/hashicorp/hil/parser/binary_op.go
new file mode 100644
index 00000000..2e013e01
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parser/binary_op.go
@@ -0,0 +1,45 @@
+package parser
+
+import (
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/hil/scanner"
+)
+
+var binaryOps []map[scanner.TokenType]ast.ArithmeticOp
+
+func init() {
+ // This operation table maps from the operator's scanner token type
+ // to the AST arithmetic operation. All expressions produced from
+ // binary operators are *ast.Arithmetic nodes.
+ //
+ // Binary operator groups are listed in order of precedence, with
+ // the *lowest* precedence first. Operators within the same group
+ // have left-to-right associativity.
+ binaryOps = []map[scanner.TokenType]ast.ArithmeticOp{
+ {
+ scanner.OR: ast.ArithmeticOpLogicalOr,
+ },
+ {
+ scanner.AND: ast.ArithmeticOpLogicalAnd,
+ },
+ {
+ scanner.EQUAL: ast.ArithmeticOpEqual,
+ scanner.NOTEQUAL: ast.ArithmeticOpNotEqual,
+ },
+ {
+ scanner.GT: ast.ArithmeticOpGreaterThan,
+ scanner.GTE: ast.ArithmeticOpGreaterThanOrEqual,
+ scanner.LT: ast.ArithmeticOpLessThan,
+ scanner.LTE: ast.ArithmeticOpLessThanOrEqual,
+ },
+ {
+ scanner.PLUS: ast.ArithmeticOpAdd,
+ scanner.MINUS: ast.ArithmeticOpSub,
+ },
+ {
+ scanner.STAR: ast.ArithmeticOpMul,
+ scanner.SLASH: ast.ArithmeticOpDiv,
+ scanner.PERCENT: ast.ArithmeticOpMod,
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/parser/error.go b/vendor/github.com/hashicorp/hil/parser/error.go
new file mode 100644
index 00000000..bacd6964
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parser/error.go
@@ -0,0 +1,38 @@
+package parser
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/hil/scanner"
+)
+
+type ParseError struct {
+ Message string
+ Pos ast.Pos
+}
+
+func Errorf(pos ast.Pos, format string, args ...interface{}) error {
+ return &ParseError{
+ Message: fmt.Sprintf(format, args...),
+ Pos: pos,
+ }
+}
+
+// TokenErrorf is a convenient wrapper around Errorf that uses the
+// position of the given token.
+func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error {
+ return Errorf(token.Pos, format, args...)
+}
+
+func ExpectationError(wanted string, got *scanner.Token) error {
+ return TokenErrorf(got, "expected %s but found %s", wanted, got)
+}
+
+func (e *ParseError) Error() string {
+ return fmt.Sprintf("parse error at %s: %s", e.Pos, e.Message)
+}
+
+func (e *ParseError) String() string {
+ return e.Error()
+}
diff --git a/vendor/github.com/hashicorp/hil/parser/fuzz.go b/vendor/github.com/hashicorp/hil/parser/fuzz.go
new file mode 100644
index 00000000..de954f38
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parser/fuzz.go
@@ -0,0 +1,28 @@
+// +build gofuzz
+
+package parser
+
+import (
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/hil/scanner"
+)
+
+// This is a fuzz testing function designed to be used with go-fuzz:
+// https://github.com/dvyukov/go-fuzz
+//
+// It's not included in a normal build due to the gofuzz build tag above.
+//
+// There are some input files that you can use as a seed corpus for go-fuzz
+// in the directory ./fuzz-corpus .
+
+func Fuzz(data []byte) int {
+ str := string(data)
+
+ ch := scanner.Scan(str, ast.Pos{Line: 1, Column: 1})
+ _, err := Parse(ch)
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/hashicorp/hil/parser/parser.go b/vendor/github.com/hashicorp/hil/parser/parser.go
new file mode 100644
index 00000000..376f1c49
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parser/parser.go
@@ -0,0 +1,522 @@
+package parser
+
+import (
+ "strconv"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/hil/scanner"
+)
+
+func Parse(ch <-chan *scanner.Token) (ast.Node, error) {
+ peeker := scanner.NewPeeker(ch)
+ parser := &parser{peeker}
+ output, err := parser.ParseTopLevel()
+ peeker.Close()
+ return output, err
+}
+
+type parser struct {
+ peeker *scanner.Peeker
+}
+
+func (p *parser) ParseTopLevel() (ast.Node, error) {
+ return p.parseInterpolationSeq(false)
+}
+
+func (p *parser) ParseQuoted() (ast.Node, error) {
+ return p.parseInterpolationSeq(true)
+}
+
+// parseInterpolationSeq parses either the top-level sequence of literals
+// and interpolation expressions or a similar sequence within a quoted
+// string inside an interpolation expression. The latter case is requested
+// by setting 'quoted' to true.
+func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) {
+ literalType := scanner.LITERAL
+ endType := scanner.EOF
+ if quoted {
+ // exceptions for quoted sequences
+ literalType = scanner.STRING
+ endType = scanner.CQUOTE
+ }
+
+ startPos := p.peeker.Peek().Pos
+
+ if quoted {
+ tok := p.peeker.Read()
+ if tok.Type != scanner.OQUOTE {
+ return nil, ExpectationError("open quote", tok)
+ }
+ }
+
+ var exprs []ast.Node
+ for {
+ tok := p.peeker.Read()
+
+ if tok.Type == endType {
+ break
+ }
+
+ switch tok.Type {
+ case literalType:
+ val, err := p.parseStringToken(tok)
+ if err != nil {
+ return nil, err
+ }
+ exprs = append(exprs, &ast.LiteralNode{
+ Value: val,
+ Typex: ast.TypeString,
+ Posx: tok.Pos,
+ })
+ case scanner.BEGIN:
+ expr, err := p.ParseInterpolation()
+ if err != nil {
+ return nil, err
+ }
+ exprs = append(exprs, expr)
+ default:
+ return nil, ExpectationError(`"${"`, tok)
+ }
+ }
+
+ if len(exprs) == 0 {
+ // If we have no parts at all then the input must've
+ // been an empty string.
+ exprs = append(exprs, &ast.LiteralNode{
+ Value: "",
+ Typex: ast.TypeString,
+ Posx: startPos,
+ })
+ }
+
+ // As a special case, if our "Output" contains only one expression
+ // and it's a literal string then we'll hoist it up to be our
+ // direct return value, so callers can easily recognize a string
+ // that has no interpolations at all.
+ if len(exprs) == 1 {
+ if lit, ok := exprs[0].(*ast.LiteralNode); ok {
+ if lit.Typex == ast.TypeString {
+ return lit, nil
+ }
+ }
+ }
+
+ return &ast.Output{
+ Exprs: exprs,
+ Posx: startPos,
+ }, nil
+}
+
+// parseStringToken takes a token of either LITERAL or STRING type and
+// returns the interpreted string, after processing any relevant
+// escape sequences.
+func (p *parser) parseStringToken(tok *scanner.Token) (string, error) {
+ var backslashes bool
+ switch tok.Type {
+ case scanner.LITERAL:
+ backslashes = false
+ case scanner.STRING:
+ backslashes = true
+ default:
+ panic("unsupported string token type")
+ }
+
+ raw := []byte(tok.Content)
+ buf := make([]byte, 0, len(raw))
+
+ for i := 0; i < len(raw); i++ {
+ b := raw[i]
+ more := len(raw) > (i + 1)
+
+ if b == '$' {
+ if more && raw[i+1] == '$' {
+ // skip over the second dollar sign
+ i++
+ }
+ } else if backslashes && b == '\\' {
+ if !more {
+ return "", Errorf(
+ ast.Pos{
+ Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
+ Line: tok.Pos.Line,
+ },
+ `unfinished backslash escape sequence`,
+ )
+ }
+ escapeType := raw[i+1]
+ switch escapeType {
+ case '\\':
+ // skip over the second slash
+ i++
+ case 'n':
+ b = '\n'
+ i++
+ case '"':
+ b = '"'
+ i++
+ default:
+ return "", Errorf(
+ ast.Pos{
+ Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
+ Line: tok.Pos.Line,
+ },
+ `invalid backslash escape sequence`,
+ )
+ }
+ }
+
+ buf = append(buf, b)
+ }
+
+ return string(buf), nil
+}
+
+func (p *parser) ParseInterpolation() (ast.Node, error) {
+ // By the time we're called, we're already "inside" the ${ sequence
+ // because the caller consumed the ${ token.
+
+ expr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ err = p.requireTokenType(scanner.END, `"}"`)
+ if err != nil {
+ return nil, err
+ }
+
+ return expr, nil
+}
+
+func (p *parser) ParseExpression() (ast.Node, error) {
+ return p.parseTernaryCond()
+}
+
+func (p *parser) parseTernaryCond() (ast.Node, error) {
+ // The ternary condition operator (.. ? .. : ..) behaves somewhat
+ // like a binary operator except that the "operator" is itself
+ // an expression enclosed in two punctuation characters.
+ // The middle expression is parsed as if the ? and : symbols
+ // were parentheses. The "rhs" (the "false expression") is then
+ // treated right-associatively so it behaves similarly to the
+ // middle in terms of precedence.
+
+ startPos := p.peeker.Peek().Pos
+
+ var cond, trueExpr, falseExpr ast.Node
+ var err error
+
+ cond, err = p.parseBinaryOps(binaryOps)
+ if err != nil {
+ return nil, err
+ }
+
+ next := p.peeker.Peek()
+ if next.Type != scanner.QUESTION {
+ return cond, nil
+ }
+
+ p.peeker.Read() // eat question mark
+
+ trueExpr, err = p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ colon := p.peeker.Read()
+ if colon.Type != scanner.COLON {
+ return nil, ExpectationError(":", colon)
+ }
+
+ falseExpr, err = p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ return &ast.Conditional{
+ CondExpr: cond,
+ TrueExpr: trueExpr,
+ FalseExpr: falseExpr,
+ Posx: startPos,
+ }, nil
+}
+
+// parseBinaryOps calls itself recursively to work through all of the
+// operator precedence groups, and then eventually calls ParseExpressionTerm
+// for each operand.
+func (p *parser) parseBinaryOps(ops []map[scanner.TokenType]ast.ArithmeticOp) (ast.Node, error) {
+ if len(ops) == 0 {
+ // We've run out of operators, so now we'll just try to parse a term.
+ return p.ParseExpressionTerm()
+ }
+
+ thisLevel := ops[0]
+ remaining := ops[1:]
+
+ startPos := p.peeker.Peek().Pos
+
+ var lhs, rhs ast.Node
+ operator := ast.ArithmeticOpInvalid
+ var err error
+
+ // parse a term that might be the first operand of a binary
+ // expression or it might just be a standalone term, but
+ // we won't know until we've parsed it and can look ahead
+ // to see if there's an operator token.
+ lhs, err = p.parseBinaryOps(remaining)
+ if err != nil {
+ return nil, err
+ }
+
+ // We'll keep eating up arithmetic operators until we run
+ // out, so that operators with the same precedence will combine in a
+ // left-associative manner:
+ // a+b+c => (a+b)+c, not a+(b+c)
+ //
+ // Should we later want to have right-associative operators, a way
+ // to achieve that would be to call back up to ParseExpression here
+ // instead of iteratively parsing only the remaining operators.
+ for {
+ next := p.peeker.Peek()
+ var newOperator ast.ArithmeticOp
+ var ok bool
+ if newOperator, ok = thisLevel[next.Type]; !ok {
+ break
+ }
+
+ // Are we extending an expression started on
+ // the previous iteration?
+ if operator != ast.ArithmeticOpInvalid {
+ lhs = &ast.Arithmetic{
+ Op: operator,
+ Exprs: []ast.Node{lhs, rhs},
+ Posx: startPos,
+ }
+ }
+
+ operator = newOperator
+ p.peeker.Read() // eat operator token
+ rhs, err = p.parseBinaryOps(remaining)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if operator != ast.ArithmeticOpInvalid {
+ return &ast.Arithmetic{
+ Op: operator,
+ Exprs: []ast.Node{lhs, rhs},
+ Posx: startPos,
+ }, nil
+ } else {
+ return lhs, nil
+ }
+}
+
+func (p *parser) ParseExpressionTerm() (ast.Node, error) {
+
+ next := p.peeker.Peek()
+
+ switch next.Type {
+
+ case scanner.OPAREN:
+ p.peeker.Read()
+ expr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ err = p.requireTokenType(scanner.CPAREN, `")"`)
+ return expr, err
+
+ case scanner.OQUOTE:
+ return p.ParseQuoted()
+
+ case scanner.INTEGER:
+ tok := p.peeker.Read()
+ val, err := strconv.Atoi(tok.Content)
+ if err != nil {
+ return nil, TokenErrorf(tok, "invalid integer: %s", err)
+ }
+ return &ast.LiteralNode{
+ Value: val,
+ Typex: ast.TypeInt,
+ Posx: tok.Pos,
+ }, nil
+
+ case scanner.FLOAT:
+ tok := p.peeker.Read()
+ val, err := strconv.ParseFloat(tok.Content, 64)
+ if err != nil {
+ return nil, TokenErrorf(tok, "invalid float: %s", err)
+ }
+ return &ast.LiteralNode{
+ Value: val,
+ Typex: ast.TypeFloat,
+ Posx: tok.Pos,
+ }, nil
+
+ case scanner.BOOL:
+ tok := p.peeker.Read()
+ // the scanner guarantees that tok.Content is either "true" or "false"
+ var val bool
+ if tok.Content[0] == 't' {
+ val = true
+ } else {
+ val = false
+ }
+ return &ast.LiteralNode{
+ Value: val,
+ Typex: ast.TypeBool,
+ Posx: tok.Pos,
+ }, nil
+
+ case scanner.MINUS:
+ opTok := p.peeker.Read()
+ // important to use ParseExpressionTerm rather than ParseExpression
+ // here, otherwise we can capture a following binary expression into
+ // our negation.
+ // e.g. -46+5 should parse as (0-46)+5, not 0-(46+5)
+ operand, err := p.ParseExpressionTerm()
+ if err != nil {
+ return nil, err
+ }
+ // The AST currently represents negative numbers as
+ // a binary subtraction of the number from zero.
+ return &ast.Arithmetic{
+ Op: ast.ArithmeticOpSub,
+ Exprs: []ast.Node{
+ &ast.LiteralNode{
+ Value: 0,
+ Typex: ast.TypeInt,
+ Posx: opTok.Pos,
+ },
+ operand,
+ },
+ Posx: opTok.Pos,
+ }, nil
+
+ case scanner.BANG:
+ opTok := p.peeker.Read()
+ // important to use ParseExpressionTerm rather than ParseExpression
+ // here, otherwise we can capture a following binary expression into
+ // our negation.
+ operand, err := p.ParseExpressionTerm()
+ if err != nil {
+ return nil, err
+ }
+ // The AST currently represents binary negation as an equality
+ // test with "false".
+ return &ast.Arithmetic{
+ Op: ast.ArithmeticOpEqual,
+ Exprs: []ast.Node{
+ &ast.LiteralNode{
+ Value: false,
+ Typex: ast.TypeBool,
+ Posx: opTok.Pos,
+ },
+ operand,
+ },
+ Posx: opTok.Pos,
+ }, nil
+
+ case scanner.IDENTIFIER:
+ return p.ParseScopeInteraction()
+
+ default:
+ return nil, ExpectationError("expression", next)
+ }
+}
+
+// ParseScopeInteraction parses the expression types that interact
+// with the evaluation scope: variable access, function calls, and
+// indexing.
+//
+// Indexing should actually be a distinct operator in its own right,
+// so that e.g. it can be applied to the result of a function call,
+// but for now we're preserving the behavior of the older yacc-based
+// parser.
+func (p *parser) ParseScopeInteraction() (ast.Node, error) {
+ first := p.peeker.Read()
+ startPos := first.Pos
+ if first.Type != scanner.IDENTIFIER {
+ return nil, ExpectationError("identifier", first)
+ }
+
+ next := p.peeker.Peek()
+ if next.Type == scanner.OPAREN {
+ // function call
+ funcName := first.Content
+ p.peeker.Read() // eat paren
+ var args []ast.Node
+
+ for {
+ if p.peeker.Peek().Type == scanner.CPAREN {
+ break
+ }
+
+ arg, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ args = append(args, arg)
+
+ if p.peeker.Peek().Type == scanner.COMMA {
+ p.peeker.Read() // eat comma
+ continue
+ } else {
+ break
+ }
+ }
+
+ err := p.requireTokenType(scanner.CPAREN, `")"`)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ast.Call{
+ Func: funcName,
+ Args: args,
+ Posx: startPos,
+ }, nil
+ }
+
+ varNode := &ast.VariableAccess{
+ Name: first.Content,
+ Posx: startPos,
+ }
+
+ if p.peeker.Peek().Type == scanner.OBRACKET {
+ // index operator
+ startPos := p.peeker.Read().Pos // eat bracket
+ indexExpr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ err = p.requireTokenType(scanner.CBRACKET, `"]"`)
+ if err != nil {
+ return nil, err
+ }
+ return &ast.Index{
+ Target: varNode,
+ Key: indexExpr,
+ Posx: startPos,
+ }, nil
+ }
+
+ return varNode, nil
+}
+
+// requireTokenType consumes the next token an returns an error if its
+// type does not match the given type. nil is returned if the type matches.
+//
+// This is a helper around peeker.Read() for situations where the parser just
+// wants to assert that a particular token type must be present.
+func (p *parser) requireTokenType(wantType scanner.TokenType, wantName string) error {
+ token := p.peeker.Read()
+ if token.Type != wantType {
+ return ExpectationError(wantName, token)
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/hil/scanner/peeker.go b/vendor/github.com/hashicorp/hil/scanner/peeker.go
new file mode 100644
index 00000000..4de37283
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/scanner/peeker.go
@@ -0,0 +1,55 @@
+package scanner
+
+// Peeker is a utility that wraps a token channel returned by Scan and
+// provides an interface that allows a caller (e.g. the parser) to
+// work with the token stream in a mode that allows one token of lookahead,
+// and provides utilities for more convenient processing of the stream.
+type Peeker struct {
+ ch <-chan *Token
+ peeked *Token
+}
+
+func NewPeeker(ch <-chan *Token) *Peeker {
+ return &Peeker{
+ ch: ch,
+ }
+}
+
+// Peek returns the next token in the stream without consuming it. A
+// subsequent call to Read will return the same token.
+func (p *Peeker) Peek() *Token {
+ if p.peeked == nil {
+ p.peeked = <-p.ch
+ }
+ return p.peeked
+}
+
+// Read consumes the next token in the stream and returns it.
+func (p *Peeker) Read() *Token {
+ token := p.Peek()
+
+ // As a special case, we will produce the EOF token forever once
+ // it is reached.
+ if token.Type != EOF {
+ p.peeked = nil
+ }
+
+ return token
+}
+
+// Close ensures that the token stream has been exhausted, to prevent
+// the goroutine in the underlying scanner from leaking.
+//
+// It's not necessary to call this if the caller reads the token stream
+// to EOF, since that implicitly closes the scanner.
+func (p *Peeker) Close() {
+ for _ = range p.ch {
+ // discard
+ }
+ // Install a synthetic EOF token in 'peeked' in case someone
+ // erroneously calls Peek() or Read() after we've closed.
+ p.peeked = &Token{
+ Type: EOF,
+ Content: "",
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go
new file mode 100644
index 00000000..bab86c67
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/scanner/scanner.go
@@ -0,0 +1,550 @@
+package scanner
+
+import (
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+// Scan returns a channel that recieves Tokens from the given input string.
+//
+// The scanner's job is just to partition the string into meaningful parts.
+// It doesn't do any transformation of the raw input string, so the caller
+// must deal with any further interpretation required, such as parsing INTEGER
+// tokens into real ints, or dealing with escape sequences in LITERAL or
+// STRING tokens.
+//
+// Strings in the returned tokens are slices from the original string.
+//
+// startPos should be set to ast.InitPos unless the caller knows that
+// this interpolation string is part of a larger file and knows the position
+// of the first character in that larger file.
+func Scan(s string, startPos ast.Pos) <-chan *Token {
+ ch := make(chan *Token)
+ go scan(s, ch, startPos)
+ return ch
+}
+
+func scan(s string, ch chan<- *Token, pos ast.Pos) {
+ // 'remain' starts off as the whole string but we gradually
+ // slice of the front of it as we work our way through.
+ remain := s
+
+ // nesting keeps track of how many ${ .. } sequences we are
+ // inside, so we can recognize the minor differences in syntax
+ // between outer string literals (LITERAL tokens) and quoted
+ // string literals (STRING tokens).
+ nesting := 0
+
+ // We're going to flip back and forth between parsing literals/strings
+ // and parsing interpolation sequences ${ .. } until we reach EOF or
+ // some INVALID token.
+All:
+ for {
+ startPos := pos
+ // Literal string processing first, since the beginning of
+ // a string is always outside of an interpolation sequence.
+ literalVal, terminator := scanLiteral(remain, pos, nesting > 0)
+
+ if len(literalVal) > 0 {
+ litType := LITERAL
+ if nesting > 0 {
+ litType = STRING
+ }
+ ch <- &Token{
+ Type: litType,
+ Content: literalVal,
+ Pos: startPos,
+ }
+ remain = remain[len(literalVal):]
+ }
+
+ ch <- terminator
+ remain = remain[len(terminator.Content):]
+ pos = terminator.Pos
+ // Safe to use len() here because none of the terminator tokens
+ // can contain UTF-8 sequences.
+ pos.Column = pos.Column + len(terminator.Content)
+
+ switch terminator.Type {
+ case INVALID:
+ // Synthetic EOF after invalid token, since further scanning
+ // is likely to just produce more garbage.
+ ch <- &Token{
+ Type: EOF,
+ Content: "",
+ Pos: pos,
+ }
+ break All
+ case EOF:
+ // All done!
+ break All
+ case BEGIN:
+ nesting++
+ case CQUOTE:
+ // nothing special to do
+ default:
+ // Should never happen
+ panic("invalid string/literal terminator")
+ }
+
+ // Now we do the processing of the insides of ${ .. } sequences.
+ // This loop terminates when we encounter either a closing } or
+ // an opening ", which will cause us to return to literal processing.
+ Interpolation:
+ for {
+
+ token, size, newPos := scanInterpolationToken(remain, pos)
+ ch <- token
+ remain = remain[size:]
+ pos = newPos
+
+ switch token.Type {
+ case INVALID:
+ // Synthetic EOF after invalid token, since further scanning
+ // is likely to just produce more garbage.
+ ch <- &Token{
+ Type: EOF,
+ Content: "",
+ Pos: pos,
+ }
+ break All
+ case EOF:
+ // All done
+ // (though a syntax error that we'll catch in the parser)
+ break All
+ case END:
+ nesting--
+ if nesting < 0 {
+ // Can happen if there are unbalanced ${ and } sequences
+ // in the input, which we'll catch in the parser.
+ nesting = 0
+ }
+ break Interpolation
+ case OQUOTE:
+ // Beginning of nested quoted string
+ break Interpolation
+ }
+ }
+ }
+
+ close(ch)
+}
+
+// Returns the token found at the start of the given string, followed by
+// the number of bytes that were consumed from the string and the adjusted
+// source position.
+//
+// Note that the number of bytes consumed can be more than the length of
+// the returned token contents if the string begins with whitespace, since
+// it will be silently consumed before reading the token.
+func scanInterpolationToken(s string, startPos ast.Pos) (*Token, int, ast.Pos) {
+ pos := startPos
+ size := 0
+
+ // Consume whitespace, if any
+ for len(s) > 0 && byteIsSpace(s[0]) {
+ if s[0] == '\n' {
+ pos.Column = 1
+ pos.Line++
+ } else {
+ pos.Column++
+ }
+ size++
+ s = s[1:]
+ }
+
+ // Unexpected EOF during sequence
+ if len(s) == 0 {
+ return &Token{
+ Type: EOF,
+ Content: "",
+ Pos: pos,
+ }, size, pos
+ }
+
+ next := s[0]
+ var token *Token
+
+ switch next {
+ case '(', ')', '[', ']', ',', '.', '+', '-', '*', '/', '%', '?', ':':
+ // Easy punctuation symbols that don't have any special meaning
+ // during scanning, and that stand for themselves in the
+ // TokenType enumeration.
+ token = &Token{
+ Type: TokenType(next),
+ Content: s[:1],
+ Pos: pos,
+ }
+ case '}':
+ token = &Token{
+ Type: END,
+ Content: s[:1],
+ Pos: pos,
+ }
+ case '"':
+ token = &Token{
+ Type: OQUOTE,
+ Content: s[:1],
+ Pos: pos,
+ }
+ case '!':
+ if len(s) >= 2 && s[:2] == "!=" {
+ token = &Token{
+ Type: NOTEQUAL,
+ Content: s[:2],
+ Pos: pos,
+ }
+ } else {
+ token = &Token{
+ Type: BANG,
+ Content: s[:1],
+ Pos: pos,
+ }
+ }
+ case '<':
+ if len(s) >= 2 && s[:2] == "<=" {
+ token = &Token{
+ Type: LTE,
+ Content: s[:2],
+ Pos: pos,
+ }
+ } else {
+ token = &Token{
+ Type: LT,
+ Content: s[:1],
+ Pos: pos,
+ }
+ }
+ case '>':
+ if len(s) >= 2 && s[:2] == ">=" {
+ token = &Token{
+ Type: GTE,
+ Content: s[:2],
+ Pos: pos,
+ }
+ } else {
+ token = &Token{
+ Type: GT,
+ Content: s[:1],
+ Pos: pos,
+ }
+ }
+ case '=':
+ if len(s) >= 2 && s[:2] == "==" {
+ token = &Token{
+ Type: EQUAL,
+ Content: s[:2],
+ Pos: pos,
+ }
+ } else {
+ // A single equals is not a valid operator
+ token = &Token{
+ Type: INVALID,
+ Content: s[:1],
+ Pos: pos,
+ }
+ }
+ case '&':
+ if len(s) >= 2 && s[:2] == "&&" {
+ token = &Token{
+ Type: AND,
+ Content: s[:2],
+ Pos: pos,
+ }
+ } else {
+ token = &Token{
+ Type: INVALID,
+ Content: s[:1],
+ Pos: pos,
+ }
+ }
+ case '|':
+ if len(s) >= 2 && s[:2] == "||" {
+ token = &Token{
+ Type: OR,
+ Content: s[:2],
+ Pos: pos,
+ }
+ } else {
+ token = &Token{
+ Type: INVALID,
+ Content: s[:1],
+ Pos: pos,
+ }
+ }
+ default:
+ if next >= '0' && next <= '9' {
+ num, numType := scanNumber(s)
+ token = &Token{
+ Type: numType,
+ Content: num,
+ Pos: pos,
+ }
+ } else if stringStartsWithIdentifier(s) {
+ ident, runeLen := scanIdentifier(s)
+ tokenType := IDENTIFIER
+ if ident == "true" || ident == "false" {
+ tokenType = BOOL
+ }
+ token = &Token{
+ Type: tokenType,
+ Content: ident,
+ Pos: pos,
+ }
+ // Skip usual token handling because it doesn't
+ // know how to deal with UTF-8 sequences.
+ pos.Column = pos.Column + runeLen
+ return token, size + len(ident), pos
+ } else {
+ _, byteLen := utf8.DecodeRuneInString(s)
+ token = &Token{
+ Type: INVALID,
+ Content: s[:byteLen],
+ Pos: pos,
+ }
+ // Skip usual token handling because it doesn't
+ // know how to deal with UTF-8 sequences.
+ pos.Column = pos.Column + 1
+ return token, size + byteLen, pos
+ }
+ }
+
+ // Here we assume that the token content contains no UTF-8 sequences,
+ // because we dealt with UTF-8 characters as a special case where
+ // necessary above.
+ size = size + len(token.Content)
+ pos.Column = pos.Column + len(token.Content)
+
+ return token, size, pos
+}
+
+// Returns the (possibly-empty) prefix of the given string that represents
+// a literal, followed by the token that marks the end of the literal.
+func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) {
+ litLen := 0
+ pos := startPos
+ var terminator *Token
+ for {
+
+ if litLen >= len(s) {
+ if nested {
+ // We've ended in the middle of a quoted string,
+ // which means this token is actually invalid.
+ return "", &Token{
+ Type: INVALID,
+ Content: s,
+ Pos: startPos,
+ }
+ }
+ terminator = &Token{
+ Type: EOF,
+ Content: "",
+ Pos: pos,
+ }
+ break
+ }
+
+ next := s[litLen]
+
+ if next == '$' && len(s) > litLen+1 {
+ follow := s[litLen+1]
+
+ if follow == '{' {
+ terminator = &Token{
+ Type: BEGIN,
+ Content: s[litLen : litLen+2],
+ Pos: pos,
+ }
+ pos.Column = pos.Column + 2
+ break
+ } else if follow == '$' {
+ // Double-$ escapes the special processing of $,
+ // so we will consume both characters here.
+ pos.Column = pos.Column + 2
+ litLen = litLen + 2
+ continue
+ }
+ }
+
+ // special handling that applies only to quoted strings
+ if nested {
+ if next == '"' {
+ terminator = &Token{
+ Type: CQUOTE,
+ Content: s[litLen : litLen+1],
+ Pos: pos,
+ }
+ pos.Column = pos.Column + 1
+ break
+ }
+
+ // Escaped quote marks do not terminate the string.
+ //
+ // All we do here in the scanner is avoid terminating a string
+ // due to an escaped quote. The parser is responsible for the
+ // full handling of escape sequences, since it's able to produce
+ // better error messages than we can produce in here.
+ if next == '\\' && len(s) > litLen+1 {
+ follow := s[litLen+1]
+
+ if follow == '"' {
+ // \" escapes the special processing of ",
+ // so we will consume both characters here.
+ pos.Column = pos.Column + 2
+ litLen = litLen + 2
+ continue
+ }
+ }
+ }
+
+ if next == '\n' {
+ pos.Column = 1
+ pos.Line++
+ litLen++
+ } else {
+ pos.Column++
+
+ // "Column" measures runes, so we need to actually consume
+ // a valid UTF-8 character here.
+ _, size := utf8.DecodeRuneInString(s[litLen:])
+ litLen = litLen + size
+ }
+
+ }
+
+ return s[:litLen], terminator
+}
+
+// scanNumber returns the extent of the prefix of the string that represents
+// a valid number, along with what type of number it represents: INT or FLOAT.
+//
+// scanNumber does only basic character analysis: numbers consist of digits
+// and periods, with at least one period signalling a FLOAT. It's the parser's
+// responsibility to validate the form and range of the number, such as ensuring
+// that a FLOAT actually contains only one period, etc.
+func scanNumber(s string) (string, TokenType) {
+ period := -1
+ byteLen := 0
+ numType := INTEGER
+ for {
+ if byteLen >= len(s) {
+ break
+ }
+
+ next := s[byteLen]
+ if next != '.' && (next < '0' || next > '9') {
+ // If our last value was a period, then we're not a float,
+ // we're just an integer that ends in a period.
+ if period == byteLen-1 {
+ byteLen--
+ numType = INTEGER
+ }
+
+ break
+ }
+
+ if next == '.' {
+ // If we've already seen a period, break out
+ if period >= 0 {
+ break
+ }
+
+ period = byteLen
+ numType = FLOAT
+ }
+
+ byteLen++
+ }
+
+ return s[:byteLen], numType
+}
+
+// scanIdentifier returns the extent of the prefix of the string that
+// represents a valid identifier, along with the length of that prefix
+// in runes.
+//
+// Identifiers may contain utf8-encoded non-Latin letters, which will
+// cause the returned "rune length" to be shorter than the byte length
+// of the returned string.
+func scanIdentifier(s string) (string, int) {
+ byteLen := 0
+ runeLen := 0
+ for {
+ if byteLen >= len(s) {
+ break
+ }
+
+ nextRune, size := utf8.DecodeRuneInString(s[byteLen:])
+ if !(nextRune == '_' ||
+ nextRune == '-' ||
+ nextRune == '.' ||
+ nextRune == '*' ||
+ unicode.IsNumber(nextRune) ||
+ unicode.IsLetter(nextRune) ||
+ unicode.IsMark(nextRune)) {
+ break
+ }
+
+ // If we reach a star, it must be between periods to be part
+ // of the same identifier.
+ if nextRune == '*' && s[byteLen-1] != '.' {
+ break
+ }
+
+ // If our previous character was a star, then the current must
+ // be period. Otherwise, undo that and exit.
+ if byteLen > 0 && s[byteLen-1] == '*' && nextRune != '.' {
+ byteLen--
+ if s[byteLen-1] == '.' {
+ byteLen--
+ }
+
+ break
+ }
+
+ byteLen = byteLen + size
+ runeLen = runeLen + 1
+ }
+
+ return s[:byteLen], runeLen
+}
+
+// byteIsSpace implements a restrictive interpretation of spaces that includes
+// only what's valid inside interpolation sequences: spaces, tabs, newlines.
+func byteIsSpace(b byte) bool {
+ switch b {
+ case ' ', '\t', '\r', '\n':
+ return true
+ default:
+ return false
+ }
+}
+
+// stringStartsWithIdentifier returns true if the given string begins with
+// a character that is a legal start of an identifier: an underscore or
+// any character that Unicode considers to be a letter.
+func stringStartsWithIdentifier(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+
+ first := s[0]
+
+ // Easy ASCII cases first
+ if (first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_' {
+ return true
+ }
+
+ // If our first byte begins a UTF-8 sequence then the sequence might
+ // be a unicode letter.
+ if utf8.RuneStart(first) {
+ firstRune, _ := utf8.DecodeRuneInString(s)
+ if unicode.IsLetter(firstRune) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/hil/scanner/token.go b/vendor/github.com/hashicorp/hil/scanner/token.go
new file mode 100644
index 00000000..b6c82ae9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/scanner/token.go
@@ -0,0 +1,105 @@
+package scanner
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+type Token struct {
+ Type TokenType
+ Content string
+ Pos ast.Pos
+}
+
+//go:generate stringer -type=TokenType
+type TokenType rune
+
+const (
+ // Raw string data outside of ${ .. } sequences
+ LITERAL TokenType = 'o'
+
+ // STRING is like a LITERAL but it's inside a quoted string
+ // within a ${ ... } sequence, and so it can contain backslash
+ // escaping.
+ STRING TokenType = 'S'
+
+ // Other Literals
+ INTEGER TokenType = 'I'
+ FLOAT TokenType = 'F'
+ BOOL TokenType = 'B'
+
+ BEGIN TokenType = '$' // actually "${"
+ END TokenType = '}'
+ OQUOTE TokenType = '“' // Opening quote of a nested quoted sequence
+ CQUOTE TokenType = '”' // Closing quote of a nested quoted sequence
+ OPAREN TokenType = '('
+ CPAREN TokenType = ')'
+ OBRACKET TokenType = '['
+ CBRACKET TokenType = ']'
+ COMMA TokenType = ','
+
+ IDENTIFIER TokenType = 'i'
+
+ PERIOD TokenType = '.'
+ PLUS TokenType = '+'
+ MINUS TokenType = '-'
+ STAR TokenType = '*'
+ SLASH TokenType = '/'
+ PERCENT TokenType = '%'
+
+ AND TokenType = '∧'
+ OR TokenType = '∨'
+ BANG TokenType = '!'
+
+ EQUAL TokenType = '='
+ NOTEQUAL TokenType = '≠'
+ GT TokenType = '>'
+ LT TokenType = '<'
+ GTE TokenType = '≥'
+ LTE TokenType = '≤'
+
+ QUESTION TokenType = '?'
+ COLON TokenType = ':'
+
+ EOF TokenType = '␄'
+
+ // Produced for sequences that cannot be understood as valid tokens
+ // e.g. due to use of unrecognized punctuation.
+ INVALID TokenType = '�'
+)
+
+func (t *Token) String() string {
+ switch t.Type {
+ case EOF:
+ return "end of string"
+ case INVALID:
+ return fmt.Sprintf("invalid sequence %q", t.Content)
+ case INTEGER:
+ return fmt.Sprintf("integer %s", t.Content)
+ case FLOAT:
+ return fmt.Sprintf("float %s", t.Content)
+ case STRING:
+ return fmt.Sprintf("string %q", t.Content)
+ case LITERAL:
+ return fmt.Sprintf("literal %q", t.Content)
+ case OQUOTE:
+ return fmt.Sprintf("opening quote")
+ case CQUOTE:
+ return fmt.Sprintf("closing quote")
+ case AND:
+ return "&&"
+ case OR:
+ return "||"
+ case NOTEQUAL:
+ return "!="
+ case GTE:
+ return ">="
+ case LTE:
+ return "<="
+ default:
+ // The remaining token types have content that
+ // speaks for itself.
+ return fmt.Sprintf("%q", t.Content)
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go
new file mode 100644
index 00000000..a602f5fd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go
@@ -0,0 +1,51 @@
+// Code generated by "stringer -type=TokenType"; DO NOT EDIT
+
+package scanner
+
+import "fmt"
+
+const _TokenType_name = "BANGBEGINPERCENTOPARENCPARENSTARPLUSCOMMAMINUSPERIODSLASHCOLONLTEQUALGTQUESTIONBOOLFLOATINTEGERSTRINGOBRACKETCBRACKETIDENTIFIERLITERALENDOQUOTECQUOTEANDORNOTEQUALLTEGTEEOFINVALID"
+
+var _TokenType_map = map[TokenType]string{
+ 33: _TokenType_name[0:4],
+ 36: _TokenType_name[4:9],
+ 37: _TokenType_name[9:16],
+ 40: _TokenType_name[16:22],
+ 41: _TokenType_name[22:28],
+ 42: _TokenType_name[28:32],
+ 43: _TokenType_name[32:36],
+ 44: _TokenType_name[36:41],
+ 45: _TokenType_name[41:46],
+ 46: _TokenType_name[46:52],
+ 47: _TokenType_name[52:57],
+ 58: _TokenType_name[57:62],
+ 60: _TokenType_name[62:64],
+ 61: _TokenType_name[64:69],
+ 62: _TokenType_name[69:71],
+ 63: _TokenType_name[71:79],
+ 66: _TokenType_name[79:83],
+ 70: _TokenType_name[83:88],
+ 73: _TokenType_name[88:95],
+ 83: _TokenType_name[95:101],
+ 91: _TokenType_name[101:109],
+ 93: _TokenType_name[109:117],
+ 105: _TokenType_name[117:127],
+ 111: _TokenType_name[127:134],
+ 125: _TokenType_name[134:137],
+ 8220: _TokenType_name[137:143],
+ 8221: _TokenType_name[143:149],
+ 8743: _TokenType_name[149:152],
+ 8744: _TokenType_name[152:154],
+ 8800: _TokenType_name[154:162],
+ 8804: _TokenType_name[162:165],
+ 8805: _TokenType_name[165:168],
+ 9220: _TokenType_name[168:171],
+ 65533: _TokenType_name[171:178],
+}
+
+func (i TokenType) String() string {
+ if str, ok := _TokenType_map[i]; ok {
+ return str
+ }
+ return fmt.Sprintf("TokenType(%d)", i)
+}
diff --git a/vendor/github.com/hashicorp/hil/transform_fixed.go b/vendor/github.com/hashicorp/hil/transform_fixed.go
new file mode 100644
index 00000000..e69df294
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/transform_fixed.go
@@ -0,0 +1,29 @@
+package hil
+
+import (
+ "github.com/hashicorp/hil/ast"
+)
+
+// FixedValueTransform transforms an AST to return a fixed value for
+// all interpolations. i.e. you can make "hi ${anything}" always
+// turn into "hi foo".
+//
+// The primary use case for this is for config validations where you can
+// verify that interpolations result in a certain type of string.
+func FixedValueTransform(root ast.Node, Value *ast.LiteralNode) ast.Node {
+ // We visit the nodes in top-down order
+ result := root
+ switch n := result.(type) {
+ case *ast.Output:
+ for i, v := range n.Exprs {
+ n.Exprs[i] = FixedValueTransform(v, Value)
+ }
+ case *ast.LiteralNode:
+ // We keep it as-is
+ default:
+ // Anything else we replace
+ result = Value
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/hil/walk.go b/vendor/github.com/hashicorp/hil/walk.go
new file mode 100644
index 00000000..0ace8306
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/walk.go
@@ -0,0 +1,266 @@
+package hil
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/hashicorp/hil/ast"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// WalkFn is the type of function to pass to Walk. Modify fields within
+// WalkData to control whether replacement happens.
+type WalkFn func(*WalkData) error
+
+// WalkData is the structure passed to the callback of the Walk function.
+//
+// This structure contains data passed in as well as fields that are expected
+// to be written by the caller as a result. Please see the documentation for
+// each field for more information.
+type WalkData struct {
+ // Root is the parsed root of this HIL program
+ Root ast.Node
+
+ // Location is the location within the structure where this
+ // value was found. This can be used to modify behavior within
+ // slices and so on.
+ Location reflectwalk.Location
+
+ // The below two values must be set by the callback to have any effect.
+ //
+ // Replace, if true, will replace the value in the structure with
+ // ReplaceValue. It is up to the caller to make sure this is a string.
+ Replace bool
+ ReplaceValue string
+}
+
+// Walk will walk an arbitrary Go structure and parse any string as an
+// HIL program and call the callback cb to determine what to replace it
+// with.
+//
+// This function is very useful for arbitrary HIL program interpolation
+// across a complex configuration structure. Due to the heavy use of
+// reflection in this function, it is recommend to write many unit tests
+// with your typical configuration structures to hilp mitigate the risk
+// of panics.
+func Walk(v interface{}, cb WalkFn) error {
+ walker := &interpolationWalker{F: cb}
+ return reflectwalk.Walk(v, walker)
+}
+
+// interpolationWalker implements interfaces for the reflectwalk package
+// (github.com/mitchellh/reflectwalk) that can be used to automatically
+// execute a callback for an interpolation.
+type interpolationWalker struct {
+ F WalkFn
+
+ key []string
+ lastValue reflect.Value
+ loc reflectwalk.Location
+ cs []reflect.Value
+ csKey []reflect.Value
+ csData interface{}
+ sliceIndex int
+ unknownKeys []string
+}
+
+func (w *interpolationWalker) Enter(loc reflectwalk.Location) error {
+ w.loc = loc
+ return nil
+}
+
+func (w *interpolationWalker) Exit(loc reflectwalk.Location) error {
+ w.loc = reflectwalk.None
+
+ switch loc {
+ case reflectwalk.Map:
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.MapValue:
+ w.key = w.key[:len(w.key)-1]
+ w.csKey = w.csKey[:len(w.csKey)-1]
+ case reflectwalk.Slice:
+ // Split any values that need to be split
+ w.splitSlice()
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.SliceElem:
+ w.csKey = w.csKey[:len(w.csKey)-1]
+ }
+
+ return nil
+}
+
+func (w *interpolationWalker) Map(m reflect.Value) error {
+ w.cs = append(w.cs, m)
+ return nil
+}
+
+func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error {
+ w.csData = k
+ w.csKey = append(w.csKey, k)
+ w.key = append(w.key, k.String())
+ w.lastValue = v
+ return nil
+}
+
+func (w *interpolationWalker) Slice(s reflect.Value) error {
+ w.cs = append(w.cs, s)
+ return nil
+}
+
+func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error {
+ w.csKey = append(w.csKey, reflect.ValueOf(i))
+ w.sliceIndex = i
+ return nil
+}
+
+func (w *interpolationWalker) Primitive(v reflect.Value) error {
+ setV := v
+
+ // We only care about strings
+ if v.Kind() == reflect.Interface {
+ setV = v
+ v = v.Elem()
+ }
+ if v.Kind() != reflect.String {
+ return nil
+ }
+
+ astRoot, err := Parse(v.String())
+ if err != nil {
+ return err
+ }
+
+ // If the AST we got is just a literal string value with the same
+ // value then we ignore it. We have to check if its the same value
+ // because it is possible to input a string, get out a string, and
+ // have it be different. For example: "foo-$${bar}" turns into
+ // "foo-${bar}"
+ if n, ok := astRoot.(*ast.LiteralNode); ok {
+ if s, ok := n.Value.(string); ok && s == v.String() {
+ return nil
+ }
+ }
+
+ if w.F == nil {
+ return nil
+ }
+
+ data := WalkData{Root: astRoot, Location: w.loc}
+ if err := w.F(&data); err != nil {
+ return fmt.Errorf(
+ "%s in:\n\n%s",
+ err, v.String())
+ }
+
+ if data.Replace {
+ /*
+ if remove {
+ w.removeCurrent()
+ return nil
+ }
+ */
+
+ resultVal := reflect.ValueOf(data.ReplaceValue)
+ switch w.loc {
+ case reflectwalk.MapKey:
+ m := w.cs[len(w.cs)-1]
+
+ // Delete the old value
+ var zero reflect.Value
+ m.SetMapIndex(w.csData.(reflect.Value), zero)
+
+ // Set the new key with the existing value
+ m.SetMapIndex(resultVal, w.lastValue)
+
+ // Set the key to be the new key
+ w.csData = resultVal
+ case reflectwalk.MapValue:
+ // If we're in a map, then the only way to set a map value is
+ // to set it directly.
+ m := w.cs[len(w.cs)-1]
+ mk := w.csData.(reflect.Value)
+ m.SetMapIndex(mk, resultVal)
+ default:
+ // Otherwise, we should be addressable
+ setV.Set(resultVal)
+ }
+ }
+
+ return nil
+}
+
+func (w *interpolationWalker) removeCurrent() {
+ // Append the key to the unknown keys
+ w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
+
+ for i := 1; i <= len(w.cs); i++ {
+ c := w.cs[len(w.cs)-i]
+ switch c.Kind() {
+ case reflect.Map:
+ // Zero value so that we delete the map key
+ var val reflect.Value
+
+ // Get the key and delete it
+ k := w.csData.(reflect.Value)
+ c.SetMapIndex(k, val)
+ return
+ }
+ }
+
+ panic("No container found for removeCurrent")
+}
+
+func (w *interpolationWalker) replaceCurrent(v reflect.Value) {
+ c := w.cs[len(w.cs)-2]
+ switch c.Kind() {
+ case reflect.Map:
+ // Get the key and delete it
+ k := w.csKey[len(w.csKey)-1]
+ c.SetMapIndex(k, v)
+ }
+}
+
+func (w *interpolationWalker) splitSlice() {
+ // Get the []interface{} slice so we can do some operations on
+ // it without dealing with reflection. We'll document each step
+ // here to be clear.
+ var s []interface{}
+ raw := w.cs[len(w.cs)-1]
+ switch v := raw.Interface().(type) {
+ case []interface{}:
+ s = v
+ case []map[string]interface{}:
+ return
+ default:
+ panic("Unknown kind: " + raw.Kind().String())
+ }
+
+ // Check if we have any elements that we need to split. If not, then
+ // just return since we're done.
+ split := false
+ if !split {
+ return
+ }
+
+ // Make a new result slice that is twice the capacity to fit our growth.
+ result := make([]interface{}, 0, len(s)*2)
+
+ // Go over each element of the original slice and start building up
+ // the resulting slice by splitting where we have to.
+ for _, v := range s {
+ sv, ok := v.(string)
+ if !ok {
+ // Not a string, so just set it
+ result = append(result, v)
+ continue
+ }
+
+ // Not a string list, so just set it
+ result = append(result, sv)
+ }
+
+ // Our slice is now done, we have to replace the slice now
+ // with this new one that we have.
+ w.replaceCurrent(reflect.ValueOf(result))
+}
diff --git a/vendor/github.com/hashicorp/logutils/LICENSE b/vendor/github.com/hashicorp/logutils/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/logutils/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/logutils/README.md b/vendor/github.com/hashicorp/logutils/README.md
new file mode 100644
index 00000000..49490eae
--- /dev/null
+++ b/vendor/github.com/hashicorp/logutils/README.md
@@ -0,0 +1,36 @@
+# logutils
+
+logutils is a Go package that augments the standard library "log" package
+to make logging a bit more modern, without fragmenting the Go ecosystem
+with new logging packages.
+
+## The simplest thing that could possibly work
+
+Presumably your application already uses the default `log` package. To switch, you'll want your code to look like the following:
+
+```go
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/hashicorp/logutils"
+)
+
+func main() {
+ filter := &logutils.LevelFilter{
+ Levels: []logutils.LogLevel{"DEBUG", "WARN", "ERROR"},
+ MinLevel: logutils.LogLevel("WARN"),
+ Writer: os.Stderr,
+ }
+ log.SetOutput(filter)
+
+ log.Print("[DEBUG] Debugging") // this will not print
+ log.Print("[WARN] Warning") // this will
+ log.Print("[ERROR] Erring") // and so will this
+ log.Print("Message I haven't updated") // and so will this
+}
+```
+
+This logs to standard error exactly like go's standard logger. Any log messages you haven't converted to have a level will continue to print as before.
diff --git a/vendor/github.com/hashicorp/logutils/level.go b/vendor/github.com/hashicorp/logutils/level.go
new file mode 100644
index 00000000..6381bf16
--- /dev/null
+++ b/vendor/github.com/hashicorp/logutils/level.go
@@ -0,0 +1,81 @@
+// Package logutils augments the standard log package with levels.
+package logutils
+
+import (
+ "bytes"
+ "io"
+ "sync"
+)
+
+type LogLevel string
+
+// LevelFilter is an io.Writer that can be used with a logger that
+// will filter out log messages that aren't at least a certain level.
+//
+// Once the filter is in use somewhere, it is not safe to modify
+// the structure.
+type LevelFilter struct {
+ // Levels is the list of log levels, in increasing order of
+ // severity. Example might be: {"DEBUG", "WARN", "ERROR"}.
+ Levels []LogLevel
+
+ // MinLevel is the minimum level allowed through
+ MinLevel LogLevel
+
+ // The underlying io.Writer where log messages that pass the filter
+ // will be set.
+ Writer io.Writer
+
+ badLevels map[LogLevel]struct{}
+ once sync.Once
+}
+
+// Check will check a given line if it would be included in the level
+// filter.
+func (f *LevelFilter) Check(line []byte) bool {
+ f.once.Do(f.init)
+
+ // Check for a log level
+ var level LogLevel
+ x := bytes.IndexByte(line, '[')
+ if x >= 0 {
+ y := bytes.IndexByte(line[x:], ']')
+ if y >= 0 {
+ level = LogLevel(line[x+1 : x+y])
+ }
+ }
+
+ _, ok := f.badLevels[level]
+ return !ok
+}
+
+func (f *LevelFilter) Write(p []byte) (n int, err error) {
+ // Note in general that io.Writer can receive any byte sequence
+ // to write, but the "log" package always guarantees that we only
+ // get a single line. We use that as a slight optimization within
+ // this method, assuming we're dealing with a single, complete line
+ // of log data.
+
+ if !f.Check(p) {
+ return len(p), nil
+ }
+
+ return f.Writer.Write(p)
+}
+
+// SetMinLevel is used to update the minimum log level
+func (f *LevelFilter) SetMinLevel(min LogLevel) {
+ f.MinLevel = min
+ f.init()
+}
+
+func (f *LevelFilter) init() {
+ badLevels := make(map[LogLevel]struct{})
+ for _, level := range f.Levels {
+ if level == f.MinLevel {
+ break
+ }
+ badLevels[level] = struct{}{}
+ }
+ f.badLevels = badLevels
+}
diff --git a/vendor/github.com/hashicorp/terraform/LICENSE b/vendor/github.com/hashicorp/terraform/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/terraform/README.md b/vendor/github.com/hashicorp/terraform/README.md
new file mode 100644
index 00000000..351cf0e8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/README.md
@@ -0,0 +1,164 @@
+Terraform
+=========
+
+- Website: https://www.terraform.io
+- [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby)
+- Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool)
+
+![Terraform](https://rawgithub.com/hashicorp/terraform/master/website/source/assets/images/logo-hashicorp.svg)
+
+Terraform is a tool for building, changing, and versioning infrastructure safely and efficiently. Terraform can manage existing and popular service providers as well as custom in-house solutions.
+
+The key features of Terraform are:
+
+- **Infrastructure as Code**: Infrastructure is described using a high-level configuration syntax. This allows a blueprint of your datacenter to be versioned and treated as you would any other code. Additionally, infrastructure can be shared and re-used.
+
+- **Execution Plans**: Terraform has a "planning" step where it generates an *execution plan*. The execution plan shows what Terraform will do when you call apply. This lets you avoid any surprises when Terraform manipulates infrastructure.
+
+- **Resource Graph**: Terraform builds a graph of all your resources, and parallelizes the creation and modification of any non-dependent resources. Because of this, Terraform builds infrastructure as efficiently as possible, and operators get insight into dependencies in their infrastructure.
+
+- **Change Automation**: Complex changesets can be applied to your infrastructure with minimal human interaction. With the previously mentioned execution plan and resource graph, you know exactly what Terraform will change and in what order, avoiding many possible human errors.
+
+For more information, see the [introduction section](http://www.terraform.io/intro) of the Terraform website.
+
+Getting Started & Documentation
+-------------------------------
+
+All documentation is available on the [Terraform website](http://www.terraform.io).
+
+Developing Terraform
+--------------------
+
+If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.8+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
+
+For local dev first make sure Go is properly installed, including setting up a [GOPATH](http://golang.org/doc/code.html#GOPATH). You will also need to add `$GOPATH/bin` to your `$PATH`.
+
+Next, using [Git](https://git-scm.com/), clone this repository into `$GOPATH/src/github.com/hashicorp/terraform`. All the necessary dependencies are either vendored or automatically installed, so you just need to type `make`. This will compile the code and then run the tests. If this exits with exit status 0, then everything is working!
+
+```sh
+$ cd "$GOPATH/src/github.com/hashicorp/terraform"
+$ make
+```
+
+To compile a development version of Terraform and the built-in plugins, run `make dev`. This will build everything using [gox](https://github.com/mitchellh/gox) and put Terraform binaries in the `bin` and `$GOPATH/bin` folders:
+
+```sh
+$ make dev
+...
+$ bin/terraform
+...
+```
+
+If you're developing a specific package, you can run tests for just that package by specifying the `TEST` variable. For example below, only`terraform` package tests will be run.
+
+```sh
+$ make test TEST=./terraform
+...
+```
+
+If you're working on a specific provider and only wish to rebuild that provider, you can use the `plugin-dev` target. For example, to build only the Azure provider:
+
+```sh
+$ make plugin-dev PLUGIN=provider-azure
+```
+
+If you're working on the core of Terraform, and only wish to rebuild that without rebuilding providers, you can use the `core-dev` target. It is important to note that some types of changes may require both core and providers to be rebuilt - for example work on the RPC interface. To build just the core of Terraform:
+
+```sh
+$ make core-dev
+```
+
+### Dependencies
+
+Terraform stores its dependencies under `vendor/`, which [Go 1.6+ will automatically recognize and load](https://golang.org/cmd/go/#hdr-Vendor_Directories). We use [`govendor`](https://github.com/kardianos/govendor) to manage the vendored dependencies.
+
+If you're developing Terraform, there are a few tasks you might need to perform.
+
+#### Adding a dependency
+
+If you're adding a dependency, you'll need to vendor it in the same Pull Request as the code that depends on it. You should do this in a separate commit from your code, as makes PR review easier and Git history simpler to read in the future.
+
+To add a dependency:
+
+Assuming your work is on a branch called `my-feature-branch`, the steps look like this:
+
+1. Add the new package to your GOPATH:
+
+ ```bash
+ go get github.com/hashicorp/my-project
+ ```
+
+2. Add the new package to your `vendor/` directory:
+
+ ```bash
+ govendor add github.com/hashicorp/my-project/package
+ ```
+
+3. Review the changes in git and commit them.
+
+#### Updating a dependency
+
+To update a dependency:
+
+1. Fetch the dependency:
+
+ ```bash
+ govendor fetch github.com/hashicorp/my-project
+ ```
+
+2. Review the changes in git and commit them.
+
+### Acceptance Tests
+
+Terraform has a comprehensive [acceptance
+test](http://en.wikipedia.org/wiki/Acceptance_testing) suite covering the
+built-in providers. Our [Contributing Guide](https://github.com/hashicorp/terraform/blob/master/.github/CONTRIBUTING.md) includes details about how and when to write and run acceptance tests in order to help contributions get accepted quickly.
+
+
+### Cross Compilation and Building for Distribution
+
+If you wish to cross-compile Terraform for another architecture, you can set the `XC_OS` and `XC_ARCH` environment variables to values representing the target operating system and architecture before calling `make`. The output is placed in the `pkg` subdirectory tree both expanded in a directory representing the OS/architecture combination and as a ZIP archive.
+
+For example, to compile 64-bit Linux binaries on Mac OS X, you can run:
+
+```sh
+$ XC_OS=linux XC_ARCH=amd64 make bin
+...
+$ file pkg/linux_amd64/terraform
+terraform: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), statically linked, not stripped
+```
+
+`XC_OS` and `XC_ARCH` can be space separated lists representing different combinations of operating system and architecture. For example, to compile for both Linux and Mac OS X, targeting both 32- and 64-bit architectures, you can run:
+
+```sh
+$ XC_OS="linux darwin" XC_ARCH="386 amd64" make bin
+...
+$ tree ./pkg/ -P "terraform|*.zip"
+./pkg/
+├── darwin_386
+│   └── terraform
+├── darwin_386.zip
+├── darwin_amd64
+│   └── terraform
+├── darwin_amd64.zip
+├── linux_386
+│   └── terraform
+├── linux_386.zip
+├── linux_amd64
+│   └── terraform
+└── linux_amd64.zip
+
+4 directories, 8 files
+```
+
+_Note: Cross-compilation uses [gox](https://github.com/mitchellh/gox), which requires toolchains to be built with versions of Go prior to 1.5. In order to successfully cross-compile with older versions of Go, you will need to run `gox -build-toolchain` before running the commands detailed above._
+
+#### Docker
+
+When using docker you don't need to have any of the Go development tools installed and you can clone terraform to any location on disk (doesn't have to be in your $GOPATH). This is useful for users who want to build `master` or a specific branch for testing without setting up a proper Go environment.
+
+For example, run the following command to build terraform in a linux-based container for macOS.
+
+```sh
+docker run --rm -v $(pwd):/go/src/github.com/hashicorp/terraform -w /go/src/github.com/hashicorp/terraform -e XC_OS=darwin -e XC_ARCH=amd64 golang:latest bash -c "apt-get update && apt-get install -y zip && make bin"
+```
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/provider.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/provider.go
new file mode 100644
index 00000000..81462e36
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/provider.go
@@ -0,0 +1,239 @@
+package ignition
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "sync"
+
+ "github.com/coreos/go-systemd/unit"
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// globalCache keeps the instances of the internal types of ignition generated
+// by the different data resources with the goal to be reused by the
+// ignition_config data resource. The key of the maps are a hash of the types
+// calculated on the type serialized to JSON.
+var globalCache = &cache{
+ disks: make(map[string]*types.Disk, 0),
+ arrays: make(map[string]*types.Raid, 0),
+ filesystems: make(map[string]*types.Filesystem, 0),
+ files: make(map[string]*types.File, 0),
+ systemdUnits: make(map[string]*types.SystemdUnit, 0),
+ networkdUnits: make(map[string]*types.NetworkdUnit, 0),
+ users: make(map[string]*types.User, 0),
+ groups: make(map[string]*types.Group, 0),
+}
+
+func Provider() terraform.ResourceProvider {
+ return &schema.Provider{
+ DataSourcesMap: map[string]*schema.Resource{
+ "ignition_config": resourceConfig(),
+ "ignition_disk": resourceDisk(),
+ "ignition_raid": resourceRaid(),
+ "ignition_filesystem": resourceFilesystem(),
+ "ignition_file": resourceFile(),
+ "ignition_systemd_unit": resourceSystemdUnit(),
+ "ignition_networkd_unit": resourceNetworkdUnit(),
+ "ignition_user": resourceUser(),
+ "ignition_group": resourceGroup(),
+ },
+ ResourcesMap: map[string]*schema.Resource{
+ "ignition_config": schema.DataSourceResourceShim(
+ "ignition_config",
+ resourceConfig(),
+ ),
+ "ignition_disk": schema.DataSourceResourceShim(
+ "ignition_disk",
+ resourceDisk(),
+ ),
+ "ignition_raid": schema.DataSourceResourceShim(
+ "ignition_raid",
+ resourceRaid(),
+ ),
+ "ignition_filesystem": schema.DataSourceResourceShim(
+ "ignition_filesystem",
+ resourceFilesystem(),
+ ),
+ "ignition_file": schema.DataSourceResourceShim(
+ "ignition_file",
+ resourceFile(),
+ ),
+ "ignition_systemd_unit": schema.DataSourceResourceShim(
+ "ignition_systemd_unit",
+ resourceSystemdUnit(),
+ ),
+ "ignition_networkd_unit": schema.DataSourceResourceShim(
+ "ignition_networkd_unit",
+ resourceNetworkdUnit(),
+ ),
+ "ignition_user": schema.DataSourceResourceShim(
+ "ignition_user",
+ resourceUser(),
+ ),
+ "ignition_group": schema.DataSourceResourceShim(
+ "ignition_group",
+ resourceGroup(),
+ ),
+ },
+ }
+}
+
+type cache struct {
+ disks map[string]*types.Disk
+ arrays map[string]*types.Raid
+ filesystems map[string]*types.Filesystem
+ files map[string]*types.File
+ systemdUnits map[string]*types.SystemdUnit
+ networkdUnits map[string]*types.NetworkdUnit
+ users map[string]*types.User
+ groups map[string]*types.Group
+
+ sync.Mutex
+}
+
+func (c *cache) addDisk(g *types.Disk) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(g)
+ c.disks[id] = g
+
+ return id
+}
+
+func (c *cache) addRaid(r *types.Raid) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(r)
+ c.arrays[id] = r
+
+ return id
+}
+
+func (c *cache) addFilesystem(f *types.Filesystem) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(f)
+ c.filesystems[id] = f
+
+ return id
+}
+
+func (c *cache) addFile(f *types.File) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(f)
+ c.files[id] = f
+
+ return id
+}
+
+func (c *cache) addSystemdUnit(u *types.SystemdUnit) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(u)
+ c.systemdUnits[id] = u
+
+ return id
+}
+
+func (c *cache) addNetworkdUnit(u *types.NetworkdUnit) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(u)
+ c.networkdUnits[id] = u
+
+ return id
+}
+
+func (c *cache) addUser(u *types.User) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(u)
+ c.users[id] = u
+
+ return id
+}
+
+func (c *cache) addGroup(g *types.Group) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(g)
+ c.groups[id] = g
+
+ return id
+}
+
+func id(input interface{}) string {
+ b, _ := json.Marshal(input)
+ return hash(string(b))
+}
+
+func hash(s string) string {
+ sha := sha256.Sum256([]byte(s))
+ return hex.EncodeToString(sha[:])
+}
+
+func castSliceInterface(i []interface{}) []string {
+ var o []string
+ for _, value := range i {
+ o = append(o, value.(string))
+ }
+
+ return o
+}
+
+func getUInt(d *schema.ResourceData, key string) *uint {
+ var uid *uint
+ if value, ok := d.GetOk(key); ok {
+ u := uint(value.(int))
+ uid = &u
+ }
+
+ return uid
+}
+
+var errEmptyUnit = fmt.Errorf("invalid or empty unit content")
+
+func validateUnitContent(content string) error {
+ c := bytes.NewBufferString(content)
+ unit, err := unit.Deserialize(c)
+ if err != nil {
+ return fmt.Errorf("invalid unit content: %s", err)
+ }
+
+ if len(unit) == 0 {
+ return errEmptyUnit
+ }
+
+ return nil
+}
+
+func buildURL(raw string) (types.Url, error) {
+ u, err := url.Parse(raw)
+ if err != nil {
+ return types.Url{}, err
+ }
+
+ return types.Url(*u), nil
+}
+
+func buildHash(raw string) (types.Hash, error) {
+ h := types.Hash{}
+ err := h.UnmarshalJSON([]byte(fmt.Sprintf("%q", raw)))
+
+ return h, err
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_config.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_config.go
new file mode 100644
index 00000000..c75e50af
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_config.go
@@ -0,0 +1,308 @@
+package ignition
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/hashicorp/terraform/helper/schema"
+
+ "github.com/coreos/ignition/config/types"
+)
+
+var configReferenceResource = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "source": &schema.Schema{
+ Type: schema.TypeString,
+ ForceNew: true,
+ Required: true,
+ },
+ "verification": &schema.Schema{
+ Type: schema.TypeString,
+ ForceNew: true,
+ Optional: true,
+ },
+ },
+}
+
+func resourceConfig() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceIgnitionFileExists,
+ Read: resourceIgnitionFileRead,
+ Schema: map[string]*schema.Schema{
+ "disks": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "arrays": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "filesystems": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "files": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "systemd": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "networkd": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "users": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "groups": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "replace": &schema.Schema{
+ Type: schema.TypeList,
+ ForceNew: true,
+ Optional: true,
+ MaxItems: 1,
+ Elem: configReferenceResource,
+ },
+ "append": &schema.Schema{
+ Type: schema.TypeList,
+ ForceNew: true,
+ Optional: true,
+ Elem: configReferenceResource,
+ },
+ "rendered": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceIgnitionFileRead(d *schema.ResourceData, meta interface{}) error {
+ rendered, err := renderConfig(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ if err := d.Set("rendered", rendered); err != nil {
+ return err
+ }
+
+ d.SetId(hash(rendered))
+ return nil
+}
+
+func resourceIgnitionFileExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ rendered, err := renderConfig(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return hash(rendered) == d.Id(), nil
+}
+
+func renderConfig(d *schema.ResourceData, c *cache) (string, error) {
+ i, err := buildConfig(d, c)
+ if err != nil {
+ return "", err
+ }
+
+ bytes, err := json.MarshalIndent(i, " ", " ")
+
+ if err != nil {
+ return "", err
+ }
+
+ return string(bytes), nil
+}
+
+func buildConfig(d *schema.ResourceData, c *cache) (*types.Config, error) {
+ var err error
+ config := &types.Config{}
+ config.Ignition, err = buildIgnition(d)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Storage, err = buildStorage(d, c)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Systemd, err = buildSystemd(d, c)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Networkd, err = buildNetworkd(d, c)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Passwd, err = buildPasswd(d, c)
+ if err != nil {
+ return nil, err
+ }
+
+ return config, nil
+}
+
+func buildIgnition(d *schema.ResourceData) (types.Ignition, error) {
+ var err error
+
+ i := types.Ignition{}
+ i.Version.UnmarshalJSON([]byte(`"2.0.0"`))
+
+ rr := d.Get("replace.0").(map[string]interface{})
+ if len(rr) != 0 {
+ i.Config.Replace, err = buildConfigReference(rr)
+ if err != nil {
+ return i, err
+ }
+ }
+
+ ar := d.Get("append").([]interface{})
+ if len(ar) != 0 {
+ for _, rr := range ar {
+ r, err := buildConfigReference(rr.(map[string]interface{}))
+ if err != nil {
+ return i, err
+ }
+
+ i.Config.Append = append(i.Config.Append, *r)
+ }
+ }
+
+ return i, nil
+}
+
+func buildConfigReference(raw map[string]interface{}) (*types.ConfigReference, error) {
+ r := &types.ConfigReference{}
+
+ src, err := buildURL(raw["source"].(string))
+ if err != nil {
+ return nil, err
+ }
+
+ r.Source = src
+
+ hash, err := buildHash(raw["verification"].(string))
+ if err != nil {
+ return nil, err
+ }
+
+ r.Verification.Hash = &hash
+
+ return r, nil
+}
+
+func buildStorage(d *schema.ResourceData, c *cache) (types.Storage, error) {
+ storage := types.Storage{}
+
+ for _, id := range d.Get("disks").([]interface{}) {
+ d, ok := c.disks[id.(string)]
+ if !ok {
+ return storage, fmt.Errorf("invalid disk %q, unknown disk id", id)
+ }
+
+ storage.Disks = append(storage.Disks, *d)
+ }
+
+ for _, id := range d.Get("arrays").([]interface{}) {
+ a, ok := c.arrays[id.(string)]
+ if !ok {
+ return storage, fmt.Errorf("invalid raid %q, unknown raid id", id)
+ }
+
+ storage.Arrays = append(storage.Arrays, *a)
+ }
+
+ for _, id := range d.Get("filesystems").([]interface{}) {
+ f, ok := c.filesystems[id.(string)]
+ if !ok {
+ return storage, fmt.Errorf("invalid filesystem %q, unknown filesystem id", id)
+ }
+
+ storage.Filesystems = append(storage.Filesystems, *f)
+ }
+
+ for _, id := range d.Get("files").([]interface{}) {
+ f, ok := c.files[id.(string)]
+ if !ok {
+ return storage, fmt.Errorf("invalid file %q, unknown file id", id)
+ }
+
+ storage.Files = append(storage.Files, *f)
+ }
+
+ return storage, nil
+
+}
+
+func buildSystemd(d *schema.ResourceData, c *cache) (types.Systemd, error) {
+ systemd := types.Systemd{}
+
+ for _, id := range d.Get("systemd").([]interface{}) {
+ u, ok := c.systemdUnits[id.(string)]
+ if !ok {
+ return systemd, fmt.Errorf("invalid systemd unit %q, unknown systemd unit id", id)
+ }
+
+ systemd.Units = append(systemd.Units, *u)
+ }
+
+ return systemd, nil
+
+}
+
+func buildNetworkd(d *schema.ResourceData, c *cache) (types.Networkd, error) {
+ networkd := types.Networkd{}
+
+ for _, id := range d.Get("networkd").([]interface{}) {
+ u, ok := c.networkdUnits[id.(string)]
+ if !ok {
+ return networkd, fmt.Errorf("invalid networkd unit %q, unknown networkd unit id", id)
+ }
+
+ networkd.Units = append(networkd.Units, *u)
+ }
+
+ return networkd, nil
+}
+
+func buildPasswd(d *schema.ResourceData, c *cache) (types.Passwd, error) {
+ passwd := types.Passwd{}
+
+ for _, id := range d.Get("users").([]interface{}) {
+ u, ok := c.users[id.(string)]
+ if !ok {
+ return passwd, fmt.Errorf("invalid user %q, unknown user id", id)
+ }
+
+ passwd.Users = append(passwd.Users, *u)
+ }
+
+ for _, id := range d.Get("groups").([]interface{}) {
+ g, ok := c.groups[id.(string)]
+ if !ok {
+ return passwd, fmt.Errorf("invalid group %q, unknown group id", id)
+ }
+
+ passwd.Groups = append(passwd.Groups, *g)
+ }
+
+ return passwd, nil
+
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_disk.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_disk.go
new file mode 100644
index 00000000..8ef6c7e0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_disk.go
@@ -0,0 +1,99 @@
+package ignition
+
+import (
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceDisk() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceDiskExists,
+ Read: resourceDiskRead,
+ Schema: map[string]*schema.Schema{
+ "device": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "wipe_table": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "partition": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "label": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "number": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "size": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "start": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "type_guid": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func resourceDiskRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildDisk(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceDiskExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildDisk(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildDisk(d *schema.ResourceData, c *cache) (string, error) {
+ var partitions []types.Partition
+ for _, raw := range d.Get("partition").([]interface{}) {
+ v := raw.(map[string]interface{})
+
+ partitions = append(partitions, types.Partition{
+ Label: types.PartitionLabel(v["label"].(string)),
+ Number: v["number"].(int),
+ Size: types.PartitionDimension(v["size"].(int)),
+ Start: types.PartitionDimension(v["start"].(int)),
+ TypeGUID: types.PartitionTypeGUID(v["type_guid"].(string)),
+ })
+ }
+
+ return c.addDisk(&types.Disk{
+ Device: types.Path(d.Get("device").(string)),
+ WipeTable: d.Get("wipe_table").(bool),
+ Partitions: partitions,
+ }), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_file.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_file.go
new file mode 100644
index 00000000..0f73ea6e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_file.go
@@ -0,0 +1,178 @@
+package ignition
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceFile() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceFileExists,
+ Read: resourceFileRead,
+ Schema: map[string]*schema.Schema{
+ "filesystem": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "path": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "content": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "mime": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ Default: "text/plain",
+ },
+
+ "content": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ },
+ },
+ },
+ "source": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "source": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "compression": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "verification": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ },
+ },
+ "mode": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "uid": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "gid": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceFileRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildFile(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceFileExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildFile(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildFile(d *schema.ResourceData, c *cache) (string, error) {
+ _, hasContent := d.GetOk("content")
+ _, hasSource := d.GetOk("source")
+ if hasContent && hasSource {
+ return "", fmt.Errorf("content and source options are incompatible")
+ }
+
+ if !hasContent && !hasSource {
+ return "", fmt.Errorf("content or source options must be present")
+ }
+
+ var compression types.Compression
+ var source types.Url
+ var hash *types.Hash
+ var err error
+
+ if hasContent {
+ source, err = encodeDataURL(
+ d.Get("content.0.mime").(string),
+ d.Get("content.0.content").(string),
+ )
+
+ if err != nil {
+ return "", err
+ }
+ }
+
+ if hasSource {
+ source, err = buildURL(d.Get("source.0.source").(string))
+ if err != nil {
+ return "", err
+ }
+
+ compression = types.Compression(d.Get("source.0.compression").(string))
+ h, err := buildHash(d.Get("source.0.verification").(string))
+ if err != nil {
+ return "", err
+ }
+
+ hash = &h
+ }
+
+ return c.addFile(&types.File{
+ Filesystem: d.Get("filesystem").(string),
+ Path: types.Path(d.Get("path").(string)),
+ Contents: types.FileContents{
+ Compression: compression,
+ Source: source,
+ Verification: types.Verification{
+ Hash: hash,
+ },
+ },
+ User: types.FileUser{
+ Id: d.Get("uid").(int),
+ },
+ Group: types.FileGroup{
+ Id: d.Get("gid").(int),
+ },
+ Mode: types.FileMode(d.Get("mode").(int)),
+ }), nil
+}
+
+func encodeDataURL(mime, content string) (types.Url, error) {
+ base64 := base64.StdEncoding.EncodeToString([]byte(content))
+ return buildURL(
+ fmt.Sprintf("data:%s;charset=utf-8;base64,%s", mime, base64),
+ )
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_filesystem.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_filesystem.go
new file mode 100644
index 00000000..ce858e80
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_filesystem.go
@@ -0,0 +1,122 @@
+package ignition
+
+import (
+ "fmt"
+
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceFilesystem() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceFilesystemExists,
+ Read: resourceFilesystemRead,
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "mount": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "device": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "format": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "create": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "force": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "options": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ },
+ },
+ },
+ "path": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceFilesystemRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildFilesystem(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceFilesystemExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildFilesystem(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildFilesystem(d *schema.ResourceData, c *cache) (string, error) {
+ var mount *types.FilesystemMount
+ if _, ok := d.GetOk("mount"); ok {
+ mount = &types.FilesystemMount{
+ Device: types.Path(d.Get("mount.0.device").(string)),
+ Format: types.FilesystemFormat(d.Get("mount.0.format").(string)),
+ }
+
+ create, hasCreate := d.GetOk("mount.0.create")
+ force, hasForce := d.GetOk("mount.0.force")
+ options, hasOptions := d.GetOk("mount.0.options")
+ if hasCreate || hasOptions || hasForce {
+ mount.Create = &types.FilesystemCreate{
+ Force: force.(bool),
+ Options: castSliceInterface(options.([]interface{})),
+ }
+ }
+
+ if !create.(bool) && (hasForce || hasOptions) {
+ return "", fmt.Errorf("create should be true when force or options is used")
+ }
+ }
+
+ var path *types.Path
+ if p, ok := d.GetOk("path"); ok {
+ tp := types.Path(p.(string))
+ path = &tp
+ }
+
+ if mount != nil && path != nil {
+ return "", fmt.Errorf("mount and path are mutually exclusive")
+ }
+
+ return c.addFilesystem(&types.Filesystem{
+ Name: d.Get("name").(string),
+ Mount: mount,
+ Path: path,
+ }), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_group.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_group.go
new file mode 100644
index 00000000..125e97e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_group.go
@@ -0,0 +1,57 @@
+package ignition
+
+import (
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceGroup() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceGroupExists,
+ Read: resourceGroupRead,
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "gid": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "password_hash": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceGroupRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildGroup(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildGroup(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildGroup(d *schema.ResourceData, c *cache) (string, error) {
+ return c.addGroup(&types.Group{
+ Name: d.Get("name").(string),
+ PasswordHash: d.Get("password_hash").(string),
+ Gid: getUInt(d, "gid"),
+ }), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_networkd_unit.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_networkd_unit.go
new file mode 100644
index 00000000..9fd40ed5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_networkd_unit.go
@@ -0,0 +1,60 @@
+package ignition
+
+import (
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceNetworkdUnit() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceNetworkdUnitExists,
+ Read: resourceNetworkdUnitRead,
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "content": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceNetworkdUnitRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildNetworkdUnit(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceNetworkdUnitDelete(d *schema.ResourceData, meta interface{}) error {
+ d.SetId("")
+ return nil
+}
+
+func resourceNetworkdUnitExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildNetworkdUnit(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildNetworkdUnit(d *schema.ResourceData, c *cache) (string, error) {
+ if err := validateUnitContent(d.Get("content").(string)); err != nil {
+ return "", err
+ }
+
+ return c.addNetworkdUnit(&types.NetworkdUnit{
+ Name: types.NetworkdUnitName(d.Get("name").(string)),
+ Contents: d.Get("content").(string),
+ }), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_raid.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_raid.go
new file mode 100644
index 00000000..dab1a5f7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_raid.go
@@ -0,0 +1,69 @@
+package ignition
+
+import (
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceRaid() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceRaidExists,
+ Read: resourceRaidRead,
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "level": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "devices": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "spares": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceRaidRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildRaid(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceRaidExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildRaid(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildRaid(d *schema.ResourceData, c *cache) (string, error) {
+ var devices []types.Path
+ for _, value := range d.Get("devices").([]interface{}) {
+ devices = append(devices, types.Path(value.(string)))
+ }
+
+ return c.addRaid(&types.Raid{
+ Name: d.Get("name").(string),
+ Level: d.Get("level").(string),
+ Devices: devices,
+ Spares: d.Get("spares").(int),
+ }), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_systemd_unit.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_systemd_unit.go
new file mode 100644
index 00000000..88fe9b20
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_systemd_unit.go
@@ -0,0 +1,104 @@
+package ignition
+
+import (
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceSystemdUnit() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceSystemdUnitExists,
+ Read: resourceSystemdUnitRead,
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "enable": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: true,
+ ForceNew: true,
+ },
+ "mask": {
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "content": {
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "dropin": {
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "content": {
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func resourceSystemdUnitRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildSystemdUnit(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceSystemdUnitExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildSystemdUnit(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildSystemdUnit(d *schema.ResourceData, c *cache) (string, error) {
+ var dropins []types.SystemdUnitDropIn
+ for _, raw := range d.Get("dropin").([]interface{}) {
+ value := raw.(map[string]interface{})
+
+ if err := validateUnitContent(value["content"].(string)); err != nil {
+ return "", err
+ }
+
+ dropins = append(dropins, types.SystemdUnitDropIn{
+ Name: types.SystemdUnitDropInName(value["name"].(string)),
+ Contents: value["content"].(string),
+ })
+ }
+
+ if err := validateUnitContent(d.Get("content").(string)); err != nil {
+ if err != errEmptyUnit {
+ return "", err
+ }
+ }
+
+ return c.addSystemdUnit(&types.SystemdUnit{
+ Name: types.SystemdUnitName(d.Get("name").(string)),
+ Contents: d.Get("content").(string),
+ Enable: d.Get("enable").(bool),
+ Mask: d.Get("mask").(bool),
+ DropIns: dropins,
+ }), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_user.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_user.go
new file mode 100644
index 00000000..183e6c8c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_user.go
@@ -0,0 +1,126 @@
+package ignition
+
+import (
+ "reflect"
+
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceUser() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceUserExists,
+ Read: resourceUserRead,
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "password_hash": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "ssh_authorized_keys": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "uid": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "gecos": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "home_dir": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "no_create_home": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "primary_group": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "groups": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "no_user_group": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "no_log_init": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "shell": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceUserRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildUser(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceUserExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildUser(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildUser(d *schema.ResourceData, c *cache) (string, error) {
+ uc := types.UserCreate{
+ Uid: getUInt(d, "uid"),
+ GECOS: d.Get("gecos").(string),
+ Homedir: d.Get("home_dir").(string),
+ NoCreateHome: d.Get("no_create_home").(bool),
+ PrimaryGroup: d.Get("primary_group").(string),
+ Groups: castSliceInterface(d.Get("groups").([]interface{})),
+ NoUserGroup: d.Get("no_user_group").(bool),
+ NoLogInit: d.Get("no_log_init").(bool),
+ Shell: d.Get("shell").(string),
+ }
+
+ puc := &uc
+ if reflect.DeepEqual(uc, types.UserCreate{}) { // check if the struct is empty
+ puc = nil
+ }
+
+ user := types.User{
+ Name: d.Get("name").(string),
+ PasswordHash: d.Get("password_hash").(string),
+ SSHAuthorizedKeys: castSliceInterface(d.Get("ssh_authorized_keys").([]interface{})),
+ Create: puc,
+ }
+
+ return c.addUser(&user), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go
new file mode 100644
index 00000000..5f4e89ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/append.go
@@ -0,0 +1,86 @@
+package config
+
+// Append appends one configuration to another.
+//
+// Append assumes that both configurations will not have
+// conflicting variables, resources, etc. If they do, the
+// problems will be caught in the validation phase.
+//
+// It is possible that c1, c2 on their own are not valid. For
+// example, a resource in c2 may reference a variable in c1. But
+// together, they would be valid.
+func Append(c1, c2 *Config) (*Config, error) {
+ c := new(Config)
+
+ // Append unknown keys, but keep them unique since it is a set
+ unknowns := make(map[string]struct{})
+ for _, k := range c1.unknownKeys {
+ _, present := unknowns[k]
+ if !present {
+ unknowns[k] = struct{}{}
+ c.unknownKeys = append(c.unknownKeys, k)
+ }
+ }
+
+ for _, k := range c2.unknownKeys {
+ _, present := unknowns[k]
+ if !present {
+ unknowns[k] = struct{}{}
+ c.unknownKeys = append(c.unknownKeys, k)
+ }
+ }
+
+ c.Atlas = c1.Atlas
+ if c2.Atlas != nil {
+ c.Atlas = c2.Atlas
+ }
+
+ // merge Terraform blocks
+ if c1.Terraform != nil {
+ c.Terraform = c1.Terraform
+ if c2.Terraform != nil {
+ c.Terraform.Merge(c2.Terraform)
+ }
+ } else {
+ c.Terraform = c2.Terraform
+ }
+
+ if len(c1.Modules) > 0 || len(c2.Modules) > 0 {
+ c.Modules = make(
+ []*Module, 0, len(c1.Modules)+len(c2.Modules))
+ c.Modules = append(c.Modules, c1.Modules...)
+ c.Modules = append(c.Modules, c2.Modules...)
+ }
+
+ if len(c1.Outputs) > 0 || len(c2.Outputs) > 0 {
+ c.Outputs = make(
+ []*Output, 0, len(c1.Outputs)+len(c2.Outputs))
+ c.Outputs = append(c.Outputs, c1.Outputs...)
+ c.Outputs = append(c.Outputs, c2.Outputs...)
+ }
+
+ if len(c1.ProviderConfigs) > 0 || len(c2.ProviderConfigs) > 0 {
+ c.ProviderConfigs = make(
+ []*ProviderConfig,
+ 0, len(c1.ProviderConfigs)+len(c2.ProviderConfigs))
+ c.ProviderConfigs = append(c.ProviderConfigs, c1.ProviderConfigs...)
+ c.ProviderConfigs = append(c.ProviderConfigs, c2.ProviderConfigs...)
+ }
+
+ if len(c1.Resources) > 0 || len(c2.Resources) > 0 {
+ c.Resources = make(
+ []*Resource,
+ 0, len(c1.Resources)+len(c2.Resources))
+ c.Resources = append(c.Resources, c1.Resources...)
+ c.Resources = append(c.Resources, c2.Resources...)
+ }
+
+ if len(c1.Variables) > 0 || len(c2.Variables) > 0 {
+ c.Variables = make(
+ []*Variable, 0, len(c1.Variables)+len(c2.Variables))
+ c.Variables = append(c.Variables, c1.Variables...)
+ c.Variables = append(c.Variables, c2.Variables...)
+ }
+
+ return c, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go
new file mode 100644
index 00000000..9a764ace
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config.go
@@ -0,0 +1,1096 @@
+// The config package is responsible for loading and validating the
+// configuration.
+package config
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/hil"
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/terraform/helper/hilmapstructure"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// NameRegexp is the regular expression that all names (modules, providers,
+// resources, etc.) must follow.
+var NameRegexp = regexp.MustCompile(`(?i)\A[A-Z0-9_][A-Z0-9\-\_]*\z`)
+
+// Config is the configuration that comes from loading a collection
+// of Terraform templates.
+type Config struct {
+ // Dir is the path to the directory where this configuration was
+ // loaded from. If it is blank, this configuration wasn't loaded from
+ // any meaningful directory.
+ Dir string
+
+ Terraform *Terraform
+ Atlas *AtlasConfig
+ Modules []*Module
+ ProviderConfigs []*ProviderConfig
+ Resources []*Resource
+ Variables []*Variable
+ Outputs []*Output
+
+ // The fields below can be filled in by loaders for validation
+ // purposes.
+ unknownKeys []string
+}
+
+// AtlasConfig is the configuration for building in HashiCorp's Atlas.
+type AtlasConfig struct {
+ Name string
+ Include []string
+ Exclude []string
+}
+
+// Module is a module used within a configuration.
+//
+// This does not represent a module itself, this represents a module
+// call-site within an existing configuration.
+type Module struct {
+ Name string
+ Source string
+ RawConfig *RawConfig
+}
+
+// ProviderConfig is the configuration for a resource provider.
+//
+// For example, Terraform needs to set the AWS access keys for the AWS
+// resource provider.
+type ProviderConfig struct {
+ Name string
+ Alias string
+ RawConfig *RawConfig
+}
+
+// A resource represents a single Terraform resource in the configuration.
+// A Terraform resource is something that supports some or all of the
+// usual "create, read, update, delete" operations, depending on
+// the given Mode.
+type Resource struct {
+ Mode ResourceMode // which operations the resource supports
+ Name string
+ Type string
+ RawCount *RawConfig
+ RawConfig *RawConfig
+ Provisioners []*Provisioner
+ Provider string
+ DependsOn []string
+ Lifecycle ResourceLifecycle
+}
+
+// Copy returns a copy of this Resource. Helpful for avoiding shared
+// config pointers across multiple pieces of the graph that need to do
+// interpolation.
+func (r *Resource) Copy() *Resource {
+ n := &Resource{
+ Mode: r.Mode,
+ Name: r.Name,
+ Type: r.Type,
+ RawCount: r.RawCount.Copy(),
+ RawConfig: r.RawConfig.Copy(),
+ Provisioners: make([]*Provisioner, 0, len(r.Provisioners)),
+ Provider: r.Provider,
+ DependsOn: make([]string, len(r.DependsOn)),
+ Lifecycle: *r.Lifecycle.Copy(),
+ }
+ for _, p := range r.Provisioners {
+ n.Provisioners = append(n.Provisioners, p.Copy())
+ }
+ copy(n.DependsOn, r.DependsOn)
+ return n
+}
+
+// ResourceLifecycle is used to store the lifecycle tuning parameters
+// to allow customized behavior
+type ResourceLifecycle struct {
+ CreateBeforeDestroy bool `mapstructure:"create_before_destroy"`
+ PreventDestroy bool `mapstructure:"prevent_destroy"`
+ IgnoreChanges []string `mapstructure:"ignore_changes"`
+}
+
+// Copy returns a copy of this ResourceLifecycle
+func (r *ResourceLifecycle) Copy() *ResourceLifecycle {
+ n := &ResourceLifecycle{
+ CreateBeforeDestroy: r.CreateBeforeDestroy,
+ PreventDestroy: r.PreventDestroy,
+ IgnoreChanges: make([]string, len(r.IgnoreChanges)),
+ }
+ copy(n.IgnoreChanges, r.IgnoreChanges)
+ return n
+}
+
+// Provisioner is a configured provisioner step on a resource.
+type Provisioner struct {
+ Type string
+ RawConfig *RawConfig
+ ConnInfo *RawConfig
+
+ When ProvisionerWhen
+ OnFailure ProvisionerOnFailure
+}
+
+// Copy returns a copy of this Provisioner
+func (p *Provisioner) Copy() *Provisioner {
+ return &Provisioner{
+ Type: p.Type,
+ RawConfig: p.RawConfig.Copy(),
+ ConnInfo: p.ConnInfo.Copy(),
+ When: p.When,
+ OnFailure: p.OnFailure,
+ }
+}
+
+// Variable is a variable defined within the configuration.
+type Variable struct {
+ Name string
+ DeclaredType string `mapstructure:"type"`
+ Default interface{}
+ Description string
+}
+
+// Output is an output defined within the configuration. An output is
+// resulting data that is highlighted by Terraform when finished. An
+// output marked Sensitive will be output in a masked form following
+// application, but will still be available in state.
+type Output struct {
+ Name string
+ DependsOn []string
+ Description string
+ Sensitive bool
+ RawConfig *RawConfig
+}
+
+// VariableType is the type of value a variable is holding, and returned
+// by the Type() function on variables.
+type VariableType byte
+
+const (
+ VariableTypeUnknown VariableType = iota
+ VariableTypeString
+ VariableTypeList
+ VariableTypeMap
+)
+
+func (v VariableType) Printable() string {
+ switch v {
+ case VariableTypeString:
+ return "string"
+ case VariableTypeMap:
+ return "map"
+ case VariableTypeList:
+ return "list"
+ default:
+ return "unknown"
+ }
+}
+
+// ProviderConfigName returns the name of the provider configuration in
+// the given mapping that maps to the proper provider configuration
+// for this resource.
+func ProviderConfigName(t string, pcs []*ProviderConfig) string {
+ lk := ""
+ for _, v := range pcs {
+ k := v.Name
+ if strings.HasPrefix(t, k) && len(k) > len(lk) {
+ lk = k
+ }
+ }
+
+ return lk
+}
+
+// A unique identifier for this module.
+func (r *Module) Id() string {
+ return fmt.Sprintf("%s", r.Name)
+}
+
+// Count returns the count of this resource.
+func (r *Resource) Count() (int, error) {
+ raw := r.RawCount.Value()
+ count, ok := r.RawCount.Value().(string)
+ if !ok {
+ return 0, fmt.Errorf(
+ "expected count to be a string or int, got %T", raw)
+ }
+
+ v, err := strconv.ParseInt(count, 0, 0)
+ if err != nil {
+ return 0, err
+ }
+
+ return int(v), nil
+}
+
+// A unique identifier for this resource.
+func (r *Resource) Id() string {
+ switch r.Mode {
+ case ManagedResourceMode:
+ return fmt.Sprintf("%s.%s", r.Type, r.Name)
+ case DataResourceMode:
+ return fmt.Sprintf("data.%s.%s", r.Type, r.Name)
+ default:
+ panic(fmt.Errorf("unknown resource mode %s", r.Mode))
+ }
+}
+
+// Validate does some basic semantic checking of the configuration.
+func (c *Config) Validate() error {
+ if c == nil {
+ return nil
+ }
+
+ var errs []error
+
+ for _, k := range c.unknownKeys {
+ errs = append(errs, fmt.Errorf(
+ "Unknown root level key: %s", k))
+ }
+
+ // Validate the Terraform config
+ if tf := c.Terraform; tf != nil {
+ errs = append(errs, c.Terraform.Validate()...)
+ }
+
+ vars := c.InterpolatedVariables()
+ varMap := make(map[string]*Variable)
+ for _, v := range c.Variables {
+ if _, ok := varMap[v.Name]; ok {
+ errs = append(errs, fmt.Errorf(
+ "Variable '%s': duplicate found. Variable names must be unique.",
+ v.Name))
+ }
+
+ varMap[v.Name] = v
+ }
+
+ for k, _ := range varMap {
+ if !NameRegexp.MatchString(k) {
+ errs = append(errs, fmt.Errorf(
+ "variable %q: variable name must match regular expresion %s",
+ k, NameRegexp))
+ }
+ }
+
+ for _, v := range c.Variables {
+ if v.Type() == VariableTypeUnknown {
+ errs = append(errs, fmt.Errorf(
+ "Variable '%s': must be a string or a map",
+ v.Name))
+ continue
+ }
+
+ interp := false
+ fn := func(n ast.Node) (interface{}, error) {
+ // LiteralNode is a literal string (outside of a ${ ... } sequence).
+ // interpolationWalker skips most of these. but in particular it
+ // visits those that have escaped sequences (like $${foo}) as a
+ // signal that *some* processing is required on this string. For
+ // our purposes here though, this is fine and not an interpolation.
+ if _, ok := n.(*ast.LiteralNode); !ok {
+ interp = true
+ }
+ return "", nil
+ }
+
+ w := &interpolationWalker{F: fn}
+ if v.Default != nil {
+ if err := reflectwalk.Walk(v.Default, w); err == nil {
+ if interp {
+ errs = append(errs, fmt.Errorf(
+ "Variable '%s': cannot contain interpolations",
+ v.Name))
+ }
+ }
+ }
+ }
+
+ // Check for references to user variables that do not actually
+ // exist and record those errors.
+ for source, vs := range vars {
+ for _, v := range vs {
+ uv, ok := v.(*UserVariable)
+ if !ok {
+ continue
+ }
+
+ if _, ok := varMap[uv.Name]; !ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: unknown variable referenced: '%s'. define it with 'variable' blocks",
+ source,
+ uv.Name))
+ }
+ }
+ }
+
+ // Check that all count variables are valid.
+ for source, vs := range vars {
+ for _, rawV := range vs {
+ switch v := rawV.(type) {
+ case *CountVariable:
+ if v.Type == CountValueInvalid {
+ errs = append(errs, fmt.Errorf(
+ "%s: invalid count variable: %s",
+ source,
+ v.FullKey()))
+ }
+ case *PathVariable:
+ if v.Type == PathValueInvalid {
+ errs = append(errs, fmt.Errorf(
+ "%s: invalid path variable: %s",
+ source,
+ v.FullKey()))
+ }
+ }
+ }
+ }
+
+ // Check that providers aren't declared multiple times.
+ providerSet := make(map[string]struct{})
+ for _, p := range c.ProviderConfigs {
+ name := p.FullName()
+ if _, ok := providerSet[name]; ok {
+ errs = append(errs, fmt.Errorf(
+ "provider.%s: declared multiple times, you can only declare a provider once",
+ name))
+ continue
+ }
+
+ providerSet[name] = struct{}{}
+ }
+
+ // Check that all references to modules are valid
+ modules := make(map[string]*Module)
+ dupped := make(map[string]struct{})
+ for _, m := range c.Modules {
+ // Check for duplicates
+ if _, ok := modules[m.Id()]; ok {
+ if _, ok := dupped[m.Id()]; !ok {
+ dupped[m.Id()] = struct{}{}
+
+ errs = append(errs, fmt.Errorf(
+ "%s: module repeated multiple times",
+ m.Id()))
+ }
+
+ // Already seen this module, just skip it
+ continue
+ }
+
+ modules[m.Id()] = m
+
+ // Check that the source has no interpolations
+ rc, err := NewRawConfig(map[string]interface{}{
+ "root": m.Source,
+ })
+ if err != nil {
+ errs = append(errs, fmt.Errorf(
+ "%s: module source error: %s",
+ m.Id(), err))
+ } else if len(rc.Interpolations) > 0 {
+ errs = append(errs, fmt.Errorf(
+ "%s: module source cannot contain interpolations",
+ m.Id()))
+ }
+
+ // Check that the name matches our regexp
+ if !NameRegexp.Match([]byte(m.Name)) {
+ errs = append(errs, fmt.Errorf(
+ "%s: module name can only contain letters, numbers, "+
+ "dashes, and underscores",
+ m.Id()))
+ }
+
+ // Check that the configuration can all be strings, lists or maps
+ raw := make(map[string]interface{})
+ for k, v := range m.RawConfig.Raw {
+ var strVal string
+ if err := hilmapstructure.WeakDecode(v, &strVal); err == nil {
+ raw[k] = strVal
+ continue
+ }
+
+ var mapVal map[string]interface{}
+ if err := hilmapstructure.WeakDecode(v, &mapVal); err == nil {
+ raw[k] = mapVal
+ continue
+ }
+
+ var sliceVal []interface{}
+ if err := hilmapstructure.WeakDecode(v, &sliceVal); err == nil {
+ raw[k] = sliceVal
+ continue
+ }
+
+ errs = append(errs, fmt.Errorf(
+ "%s: variable %s must be a string, list or map value",
+ m.Id(), k))
+ }
+
+ // Check for invalid count variables
+ for _, v := range m.RawConfig.Variables {
+ switch v.(type) {
+ case *CountVariable:
+ errs = append(errs, fmt.Errorf(
+ "%s: count variables are only valid within resources", m.Name))
+ case *SelfVariable:
+ errs = append(errs, fmt.Errorf(
+ "%s: self variables are only valid within resources", m.Name))
+ }
+ }
+
+ // Update the raw configuration to only contain the string values
+ m.RawConfig, err = NewRawConfig(raw)
+ if err != nil {
+ errs = append(errs, fmt.Errorf(
+ "%s: can't initialize configuration: %s",
+ m.Id(), err))
+ }
+ }
+ dupped = nil
+
+ // Check that all variables for modules reference modules that
+ // exist.
+ for source, vs := range vars {
+ for _, v := range vs {
+ mv, ok := v.(*ModuleVariable)
+ if !ok {
+ continue
+ }
+
+ if _, ok := modules[mv.Name]; !ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: unknown module referenced: %s",
+ source,
+ mv.Name))
+ }
+ }
+ }
+
+ // Check that all references to resources are valid
+ resources := make(map[string]*Resource)
+ dupped = make(map[string]struct{})
+ for _, r := range c.Resources {
+ if _, ok := resources[r.Id()]; ok {
+ if _, ok := dupped[r.Id()]; !ok {
+ dupped[r.Id()] = struct{}{}
+
+ errs = append(errs, fmt.Errorf(
+ "%s: resource repeated multiple times",
+ r.Id()))
+ }
+ }
+
+ resources[r.Id()] = r
+ }
+ dupped = nil
+
+ // Validate resources
+ for n, r := range resources {
+ // Verify count variables
+ for _, v := range r.RawCount.Variables {
+ switch v.(type) {
+ case *CountVariable:
+ errs = append(errs, fmt.Errorf(
+ "%s: resource count can't reference count variable: %s",
+ n,
+ v.FullKey()))
+ case *SimpleVariable:
+ errs = append(errs, fmt.Errorf(
+ "%s: resource count can't reference variable: %s",
+ n,
+ v.FullKey()))
+
+ // Good
+ case *ModuleVariable:
+ case *ResourceVariable:
+ case *TerraformVariable:
+ case *UserVariable:
+
+ default:
+ errs = append(errs, fmt.Errorf(
+ "Internal error. Unknown type in count var in %s: %T",
+ n, v))
+ }
+ }
+
+ // Interpolate with a fixed number to verify that its a number.
+ r.RawCount.interpolate(func(root ast.Node) (interface{}, error) {
+ // Execute the node but transform the AST so that it returns
+ // a fixed value of "5" for all interpolations.
+ result, err := hil.Eval(
+ hil.FixedValueTransform(
+ root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}),
+ nil)
+ if err != nil {
+ return "", err
+ }
+
+ return result.Value, nil
+ })
+ _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0)
+ if err != nil {
+ errs = append(errs, fmt.Errorf(
+ "%s: resource count must be an integer",
+ n))
+ }
+ r.RawCount.init()
+
+ // Validate DependsOn
+ errs = append(errs, c.validateDependsOn(n, r.DependsOn, resources, modules)...)
+
+ // Verify provisioners
+ for _, p := range r.Provisioners {
+ // This validation checks that there are now splat variables
+ // referencing ourself. This currently is not allowed.
+
+ for _, v := range p.ConnInfo.Variables {
+ rv, ok := v.(*ResourceVariable)
+ if !ok {
+ continue
+ }
+
+ if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name {
+ errs = append(errs, fmt.Errorf(
+ "%s: connection info cannot contain splat variable "+
+ "referencing itself", n))
+ break
+ }
+ }
+
+ for _, v := range p.RawConfig.Variables {
+ rv, ok := v.(*ResourceVariable)
+ if !ok {
+ continue
+ }
+
+ if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name {
+ errs = append(errs, fmt.Errorf(
+ "%s: connection info cannot contain splat variable "+
+ "referencing itself", n))
+ break
+ }
+ }
+
+ // Check for invalid when/onFailure values, though this should be
+ // picked up by the loader we check here just in case.
+ if p.When == ProvisionerWhenInvalid {
+ errs = append(errs, fmt.Errorf(
+ "%s: provisioner 'when' value is invalid", n))
+ }
+ if p.OnFailure == ProvisionerOnFailureInvalid {
+ errs = append(errs, fmt.Errorf(
+ "%s: provisioner 'on_failure' value is invalid", n))
+ }
+ }
+
+ // Verify ignore_changes contains valid entries
+ for _, v := range r.Lifecycle.IgnoreChanges {
+ if strings.Contains(v, "*") && v != "*" {
+ errs = append(errs, fmt.Errorf(
+ "%s: ignore_changes does not support using a partial string "+
+ "together with a wildcard: %s", n, v))
+ }
+ }
+
+ // Verify ignore_changes has no interpolations
+ rc, err := NewRawConfig(map[string]interface{}{
+ "root": r.Lifecycle.IgnoreChanges,
+ })
+ if err != nil {
+ errs = append(errs, fmt.Errorf(
+ "%s: lifecycle ignore_changes error: %s",
+ n, err))
+ } else if len(rc.Interpolations) > 0 {
+ errs = append(errs, fmt.Errorf(
+ "%s: lifecycle ignore_changes cannot contain interpolations",
+ n))
+ }
+
+ // If it is a data source then it can't have provisioners
+ if r.Mode == DataResourceMode {
+ if _, ok := r.RawConfig.Raw["provisioner"]; ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: data sources cannot have provisioners",
+ n))
+ }
+ }
+ }
+
+ for source, vs := range vars {
+ for _, v := range vs {
+ rv, ok := v.(*ResourceVariable)
+ if !ok {
+ continue
+ }
+
+ id := rv.ResourceId()
+ if _, ok := resources[id]; !ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: unknown resource '%s' referenced in variable %s",
+ source,
+ id,
+ rv.FullKey()))
+ continue
+ }
+ }
+ }
+
+ // Check that all outputs are valid
+ {
+ found := make(map[string]struct{})
+ for _, o := range c.Outputs {
+ // Verify the output is new
+ if _, ok := found[o.Name]; ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: duplicate output. output names must be unique.",
+ o.Name))
+ continue
+ }
+ found[o.Name] = struct{}{}
+
+ var invalidKeys []string
+ valueKeyFound := false
+ for k := range o.RawConfig.Raw {
+ if k == "value" {
+ valueKeyFound = true
+ continue
+ }
+ if k == "sensitive" {
+ if sensitive, ok := o.RawConfig.config[k].(bool); ok {
+ if sensitive {
+ o.Sensitive = true
+ }
+ continue
+ }
+
+ errs = append(errs, fmt.Errorf(
+ "%s: value for 'sensitive' must be boolean",
+ o.Name))
+ continue
+ }
+ if k == "description" {
+ if desc, ok := o.RawConfig.config[k].(string); ok {
+ o.Description = desc
+ continue
+ }
+
+ errs = append(errs, fmt.Errorf(
+ "%s: value for 'description' must be string",
+ o.Name))
+ continue
+ }
+ invalidKeys = append(invalidKeys, k)
+ }
+ if len(invalidKeys) > 0 {
+ errs = append(errs, fmt.Errorf(
+ "%s: output has invalid keys: %s",
+ o.Name, strings.Join(invalidKeys, ", ")))
+ }
+ if !valueKeyFound {
+ errs = append(errs, fmt.Errorf(
+ "%s: output is missing required 'value' key", o.Name))
+ }
+
+ for _, v := range o.RawConfig.Variables {
+ if _, ok := v.(*CountVariable); ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: count variables are only valid within resources", o.Name))
+ }
+ }
+ }
+ }
+
+ // Check that all variables are in the proper context
+ for source, rc := range c.rawConfigs() {
+ walker := &interpolationWalker{
+ ContextF: c.validateVarContextFn(source, &errs),
+ }
+ if err := reflectwalk.Walk(rc.Raw, walker); err != nil {
+ errs = append(errs, fmt.Errorf(
+ "%s: error reading config: %s", source, err))
+ }
+ }
+
+ // Validate the self variable
+ for source, rc := range c.rawConfigs() {
+ // Ignore provisioners. This is a pretty brittle way to do this,
+ // but better than also repeating all the resources.
+ if strings.Contains(source, "provision") {
+ continue
+ }
+
+ for _, v := range rc.Variables {
+ if _, ok := v.(*SelfVariable); ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: cannot contain self-reference %s", source, v.FullKey()))
+ }
+ }
+ }
+
+ if len(errs) > 0 {
+ return &multierror.Error{Errors: errs}
+ }
+
+ return nil
+}
+
+// InterpolatedVariables is a helper that returns a mapping of all the interpolated
+// variables within the configuration. This is used to verify references
+// are valid in the Validate step.
+func (c *Config) InterpolatedVariables() map[string][]InterpolatedVariable {
+ result := make(map[string][]InterpolatedVariable)
+ for source, rc := range c.rawConfigs() {
+ for _, v := range rc.Variables {
+ result[source] = append(result[source], v)
+ }
+ }
+ return result
+}
+
+// rawConfigs returns all of the RawConfigs that are available keyed by
+// a human-friendly source.
+func (c *Config) rawConfigs() map[string]*RawConfig {
+ result := make(map[string]*RawConfig)
+ for _, m := range c.Modules {
+ source := fmt.Sprintf("module '%s'", m.Name)
+ result[source] = m.RawConfig
+ }
+
+ for _, pc := range c.ProviderConfigs {
+ source := fmt.Sprintf("provider config '%s'", pc.Name)
+ result[source] = pc.RawConfig
+ }
+
+ for _, rc := range c.Resources {
+ source := fmt.Sprintf("resource '%s'", rc.Id())
+ result[source+" count"] = rc.RawCount
+ result[source+" config"] = rc.RawConfig
+
+ for i, p := range rc.Provisioners {
+ subsource := fmt.Sprintf(
+ "%s provisioner %s (#%d)",
+ source, p.Type, i+1)
+ result[subsource] = p.RawConfig
+ }
+ }
+
+ for _, o := range c.Outputs {
+ source := fmt.Sprintf("output '%s'", o.Name)
+ result[source] = o.RawConfig
+ }
+
+ return result
+}
+
+func (c *Config) validateVarContextFn(
+ source string, errs *[]error) interpolationWalkerContextFunc {
+ return func(loc reflectwalk.Location, node ast.Node) {
+ // If we're in a slice element, then its fine, since you can do
+ // anything in there.
+ if loc == reflectwalk.SliceElem {
+ return
+ }
+
+ // Otherwise, let's check if there is a splat resource variable
+ // at the top level in here. We do this by doing a transform that
+ // replaces everything with a noop node unless its a variable
+ // access or concat. This should turn the AST into a flat tree
+ // of Concat(Noop, ...). If there are any variables left that are
+ // multi-access, then its still broken.
+ node = node.Accept(func(n ast.Node) ast.Node {
+ // If it is a concat or variable access, we allow it.
+ switch n.(type) {
+ case *ast.Output:
+ return n
+ case *ast.VariableAccess:
+ return n
+ }
+
+ // Otherwise, noop
+ return &noopNode{}
+ })
+
+ vars, err := DetectVariables(node)
+ if err != nil {
+ // Ignore it since this will be caught during parse. This
+ // actually probably should never happen by the time this
+ // is called, but its okay.
+ return
+ }
+
+ for _, v := range vars {
+ rv, ok := v.(*ResourceVariable)
+ if !ok {
+ return
+ }
+
+ if rv.Multi && rv.Index == -1 {
+ *errs = append(*errs, fmt.Errorf(
+ "%s: use of the splat ('*') operator must be wrapped in a list declaration",
+ source))
+ }
+ }
+ }
+}
+
+func (c *Config) validateDependsOn(
+ n string,
+ v []string,
+ resources map[string]*Resource,
+ modules map[string]*Module) []error {
+ // Verify depends on points to resources that all exist
+ var errs []error
+ for _, d := range v {
+ // Check if we contain interpolations
+ rc, err := NewRawConfig(map[string]interface{}{
+ "value": d,
+ })
+ if err == nil && len(rc.Variables) > 0 {
+ errs = append(errs, fmt.Errorf(
+ "%s: depends on value cannot contain interpolations: %s",
+ n, d))
+ continue
+ }
+
+ // If it is a module, verify it is a module
+ if strings.HasPrefix(d, "module.") {
+ name := d[len("module."):]
+ if _, ok := modules[name]; !ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: resource depends on non-existent module '%s'",
+ n, name))
+ }
+
+ continue
+ }
+
+ // Check resources
+ if _, ok := resources[d]; !ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: resource depends on non-existent resource '%s'",
+ n, d))
+ }
+ }
+
+ return errs
+}
+
+func (m *Module) mergerName() string {
+ return m.Id()
+}
+
+func (m *Module) mergerMerge(other merger) merger {
+ m2 := other.(*Module)
+
+ result := *m
+ result.Name = m2.Name
+ result.RawConfig = result.RawConfig.merge(m2.RawConfig)
+
+ if m2.Source != "" {
+ result.Source = m2.Source
+ }
+
+ return &result
+}
+
+func (o *Output) mergerName() string {
+ return o.Name
+}
+
+func (o *Output) mergerMerge(m merger) merger {
+ o2 := m.(*Output)
+
+ result := *o
+ result.Name = o2.Name
+ result.Description = o2.Description
+ result.RawConfig = result.RawConfig.merge(o2.RawConfig)
+ result.Sensitive = o2.Sensitive
+ result.DependsOn = o2.DependsOn
+
+ return &result
+}
+
+func (c *ProviderConfig) GoString() string {
+ return fmt.Sprintf("*%#v", *c)
+}
+
+func (c *ProviderConfig) FullName() string {
+ if c.Alias == "" {
+ return c.Name
+ }
+
+ return fmt.Sprintf("%s.%s", c.Name, c.Alias)
+}
+
+func (c *ProviderConfig) mergerName() string {
+ return c.Name
+}
+
+func (c *ProviderConfig) mergerMerge(m merger) merger {
+ c2 := m.(*ProviderConfig)
+
+ result := *c
+ result.Name = c2.Name
+ result.RawConfig = result.RawConfig.merge(c2.RawConfig)
+
+ if c2.Alias != "" {
+ result.Alias = c2.Alias
+ }
+
+ return &result
+}
+
+func (r *Resource) mergerName() string {
+ return r.Id()
+}
+
+func (r *Resource) mergerMerge(m merger) merger {
+ r2 := m.(*Resource)
+
+ result := *r
+ result.Mode = r2.Mode
+ result.Name = r2.Name
+ result.Type = r2.Type
+ result.RawConfig = result.RawConfig.merge(r2.RawConfig)
+
+ if r2.RawCount.Value() != "1" {
+ result.RawCount = r2.RawCount
+ }
+
+ if len(r2.Provisioners) > 0 {
+ result.Provisioners = r2.Provisioners
+ }
+
+ return &result
+}
+
+// Merge merges two variables to create a new third variable.
+func (v *Variable) Merge(v2 *Variable) *Variable {
+ // Shallow copy the variable
+ result := *v
+
+ // The names should be the same, but the second name always wins.
+ result.Name = v2.Name
+
+ if v2.DeclaredType != "" {
+ result.DeclaredType = v2.DeclaredType
+ }
+ if v2.Default != nil {
+ result.Default = v2.Default
+ }
+ if v2.Description != "" {
+ result.Description = v2.Description
+ }
+
+ return &result
+}
+
+var typeStringMap = map[string]VariableType{
+ "string": VariableTypeString,
+ "map": VariableTypeMap,
+ "list": VariableTypeList,
+}
+
+// Type returns the type of variable this is.
+func (v *Variable) Type() VariableType {
+ if v.DeclaredType != "" {
+ declaredType, ok := typeStringMap[v.DeclaredType]
+ if !ok {
+ return VariableTypeUnknown
+ }
+
+ return declaredType
+ }
+
+ return v.inferTypeFromDefault()
+}
+
+// ValidateTypeAndDefault ensures that default variable value is compatible
+// with the declared type (if one exists), and that the type is one which is
+// known to Terraform
+func (v *Variable) ValidateTypeAndDefault() error {
+ // If an explicit type is declared, ensure it is valid
+ if v.DeclaredType != "" {
+ if _, ok := typeStringMap[v.DeclaredType]; !ok {
+ validTypes := []string{}
+ for k := range typeStringMap {
+ validTypes = append(validTypes, k)
+ }
+ return fmt.Errorf(
+ "Variable '%s' type must be one of [%s] - '%s' is not a valid type",
+ v.Name,
+ strings.Join(validTypes, ", "),
+ v.DeclaredType,
+ )
+ }
+ }
+
+ if v.DeclaredType == "" || v.Default == nil {
+ return nil
+ }
+
+ if v.inferTypeFromDefault() != v.Type() {
+ return fmt.Errorf("'%s' has a default value which is not of type '%s' (got '%s')",
+ v.Name, v.DeclaredType, v.inferTypeFromDefault().Printable())
+ }
+
+ return nil
+}
+
+func (v *Variable) mergerName() string {
+ return v.Name
+}
+
+func (v *Variable) mergerMerge(m merger) merger {
+ return v.Merge(m.(*Variable))
+}
+
+// Required tests whether a variable is required or not.
+func (v *Variable) Required() bool {
+ return v.Default == nil
+}
+
+// inferTypeFromDefault contains the logic for the old method of inferring
+// variable types - we can also use this for validating that the declared
+// type matches the type of the default value
+func (v *Variable) inferTypeFromDefault() VariableType {
+ if v.Default == nil {
+ return VariableTypeString
+ }
+
+ var s string
+ if err := hilmapstructure.WeakDecode(v.Default, &s); err == nil {
+ v.Default = s
+ return VariableTypeString
+ }
+
+ var m map[string]interface{}
+ if err := hilmapstructure.WeakDecode(v.Default, &m); err == nil {
+ v.Default = m
+ return VariableTypeMap
+ }
+
+ var l []interface{}
+ if err := hilmapstructure.WeakDecode(v.Default, &l); err == nil {
+ v.Default = l
+ return VariableTypeList
+ }
+
+ return VariableTypeUnknown
+}
+
+func (m ResourceMode) Taintable() bool {
+ switch m {
+ case ManagedResourceMode:
+ return true
+ case DataResourceMode:
+ return false
+ default:
+ panic(fmt.Errorf("unsupported ResourceMode value %s", m))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go
new file mode 100644
index 00000000..0b3abbcd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config_string.go
@@ -0,0 +1,338 @@
+package config
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// TestString is a Stringer-like function that outputs a string that can
+// be used to easily compare multiple Config structures in unit tests.
+//
+// This function has no practical use outside of unit tests and debugging.
+func (c *Config) TestString() string {
+ if c == nil {
+ return "<nil config>"
+ }
+
+ var buf bytes.Buffer
+ if len(c.Modules) > 0 {
+ buf.WriteString("Modules:\n\n")
+ buf.WriteString(modulesStr(c.Modules))
+ buf.WriteString("\n\n")
+ }
+
+ if len(c.Variables) > 0 {
+ buf.WriteString("Variables:\n\n")
+ buf.WriteString(variablesStr(c.Variables))
+ buf.WriteString("\n\n")
+ }
+
+ if len(c.ProviderConfigs) > 0 {
+ buf.WriteString("Provider Configs:\n\n")
+ buf.WriteString(providerConfigsStr(c.ProviderConfigs))
+ buf.WriteString("\n\n")
+ }
+
+ if len(c.Resources) > 0 {
+ buf.WriteString("Resources:\n\n")
+ buf.WriteString(resourcesStr(c.Resources))
+ buf.WriteString("\n\n")
+ }
+
+ if len(c.Outputs) > 0 {
+ buf.WriteString("Outputs:\n\n")
+ buf.WriteString(outputsStr(c.Outputs))
+ buf.WriteString("\n")
+ }
+
+ return strings.TrimSpace(buf.String())
+}
+
+func terraformStr(t *Terraform) string {
+ result := ""
+
+ if b := t.Backend; b != nil {
+ result += fmt.Sprintf("backend (%s)\n", b.Type)
+
+ keys := make([]string, 0, len(b.RawConfig.Raw))
+ for k, _ := range b.RawConfig.Raw {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ result += fmt.Sprintf(" %s\n", k)
+ }
+ }
+
+ return strings.TrimSpace(result)
+}
+
+func modulesStr(ms []*Module) string {
+ result := ""
+ order := make([]int, 0, len(ms))
+ ks := make([]string, 0, len(ms))
+ mapping := make(map[string]int)
+ for i, m := range ms {
+ k := m.Id()
+ ks = append(ks, k)
+ mapping[k] = i
+ }
+ sort.Strings(ks)
+ for _, k := range ks {
+ order = append(order, mapping[k])
+ }
+
+ for _, i := range order {
+ m := ms[i]
+ result += fmt.Sprintf("%s\n", m.Id())
+
+ ks := make([]string, 0, len(m.RawConfig.Raw))
+ for k, _ := range m.RawConfig.Raw {
+ ks = append(ks, k)
+ }
+ sort.Strings(ks)
+
+ result += fmt.Sprintf(" source = %s\n", m.Source)
+
+ for _, k := range ks {
+ result += fmt.Sprintf(" %s\n", k)
+ }
+ }
+
+ return strings.TrimSpace(result)
+}
+
+func outputsStr(os []*Output) string {
+ ns := make([]string, 0, len(os))
+ m := make(map[string]*Output)
+ for _, o := range os {
+ ns = append(ns, o.Name)
+ m[o.Name] = o
+ }
+ sort.Strings(ns)
+
+ result := ""
+ for _, n := range ns {
+ o := m[n]
+
+ result += fmt.Sprintf("%s\n", n)
+
+ if len(o.DependsOn) > 0 {
+ result += fmt.Sprintf(" dependsOn\n")
+ for _, d := range o.DependsOn {
+ result += fmt.Sprintf(" %s\n", d)
+ }
+ }
+
+ if len(o.RawConfig.Variables) > 0 {
+ result += fmt.Sprintf(" vars\n")
+ for _, rawV := range o.RawConfig.Variables {
+ kind := "unknown"
+ str := rawV.FullKey()
+
+ switch rawV.(type) {
+ case *ResourceVariable:
+ kind = "resource"
+ case *UserVariable:
+ kind = "user"
+ }
+
+ result += fmt.Sprintf(" %s: %s\n", kind, str)
+ }
+ }
+ }
+
+ return strings.TrimSpace(result)
+}
+
+// This helper turns a provider configs field into a deterministic
+// string value for comparison in tests.
+func providerConfigsStr(pcs []*ProviderConfig) string {
+ result := ""
+
+ ns := make([]string, 0, len(pcs))
+ m := make(map[string]*ProviderConfig)
+ for _, n := range pcs {
+ ns = append(ns, n.Name)
+ m[n.Name] = n
+ }
+ sort.Strings(ns)
+
+ for _, n := range ns {
+ pc := m[n]
+
+ result += fmt.Sprintf("%s\n", n)
+
+ keys := make([]string, 0, len(pc.RawConfig.Raw))
+ for k, _ := range pc.RawConfig.Raw {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ result += fmt.Sprintf(" %s\n", k)
+ }
+
+ if len(pc.RawConfig.Variables) > 0 {
+ result += fmt.Sprintf(" vars\n")
+ for _, rawV := range pc.RawConfig.Variables {
+ kind := "unknown"
+ str := rawV.FullKey()
+
+ switch rawV.(type) {
+ case *ResourceVariable:
+ kind = "resource"
+ case *UserVariable:
+ kind = "user"
+ }
+
+ result += fmt.Sprintf(" %s: %s\n", kind, str)
+ }
+ }
+ }
+
+ return strings.TrimSpace(result)
+}
+
+// This helper turns a resources field into a deterministic
+// string value for comparison in tests.
+func resourcesStr(rs []*Resource) string {
+ result := ""
+ order := make([]int, 0, len(rs))
+ ks := make([]string, 0, len(rs))
+ mapping := make(map[string]int)
+ for i, r := range rs {
+ k := r.Id()
+ ks = append(ks, k)
+ mapping[k] = i
+ }
+ sort.Strings(ks)
+ for _, k := range ks {
+ order = append(order, mapping[k])
+ }
+
+ for _, i := range order {
+ r := rs[i]
+ result += fmt.Sprintf(
+ "%s (x%s)\n",
+ r.Id(),
+ r.RawCount.Value())
+
+ ks := make([]string, 0, len(r.RawConfig.Raw))
+ for k, _ := range r.RawConfig.Raw {
+ ks = append(ks, k)
+ }
+ sort.Strings(ks)
+
+ for _, k := range ks {
+ result += fmt.Sprintf(" %s\n", k)
+ }
+
+ if len(r.Provisioners) > 0 {
+ result += fmt.Sprintf(" provisioners\n")
+ for _, p := range r.Provisioners {
+ when := ""
+ if p.When != ProvisionerWhenCreate {
+ when = fmt.Sprintf(" (%s)", p.When.String())
+ }
+
+ result += fmt.Sprintf(" %s%s\n", p.Type, when)
+
+ if p.OnFailure != ProvisionerOnFailureFail {
+ result += fmt.Sprintf(" on_failure = %s\n", p.OnFailure.String())
+ }
+
+ ks := make([]string, 0, len(p.RawConfig.Raw))
+ for k, _ := range p.RawConfig.Raw {
+ ks = append(ks, k)
+ }
+ sort.Strings(ks)
+
+ for _, k := range ks {
+ result += fmt.Sprintf(" %s\n", k)
+ }
+ }
+ }
+
+ if len(r.DependsOn) > 0 {
+ result += fmt.Sprintf(" dependsOn\n")
+ for _, d := range r.DependsOn {
+ result += fmt.Sprintf(" %s\n", d)
+ }
+ }
+
+ if len(r.RawConfig.Variables) > 0 {
+ result += fmt.Sprintf(" vars\n")
+
+ ks := make([]string, 0, len(r.RawConfig.Variables))
+ for k, _ := range r.RawConfig.Variables {
+ ks = append(ks, k)
+ }
+ sort.Strings(ks)
+
+ for _, k := range ks {
+ rawV := r.RawConfig.Variables[k]
+ kind := "unknown"
+ str := rawV.FullKey()
+
+ switch rawV.(type) {
+ case *ResourceVariable:
+ kind = "resource"
+ case *UserVariable:
+ kind = "user"
+ }
+
+ result += fmt.Sprintf(" %s: %s\n", kind, str)
+ }
+ }
+ }
+
+ return strings.TrimSpace(result)
+}
+
+// This helper turns a variables field into a deterministic
+// string value for comparison in tests.
+func variablesStr(vs []*Variable) string {
+ result := ""
+ ks := make([]string, 0, len(vs))
+ m := make(map[string]*Variable)
+ for _, v := range vs {
+ ks = append(ks, v.Name)
+ m[v.Name] = v
+ }
+ sort.Strings(ks)
+
+ for _, k := range ks {
+ v := m[k]
+
+ required := ""
+ if v.Required() {
+ required = " (required)"
+ }
+
+ declaredType := ""
+ if v.DeclaredType != "" {
+ declaredType = fmt.Sprintf(" (%s)", v.DeclaredType)
+ }
+
+ if v.Default == nil || v.Default == "" {
+ v.Default = "<>"
+ }
+ if v.Description == "" {
+ v.Description = "<>"
+ }
+
+ result += fmt.Sprintf(
+ "%s%s%s\n %v\n %s\n",
+ k,
+ required,
+ declaredType,
+ v.Default,
+ v.Description)
+ }
+
+ return strings.TrimSpace(result)
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/config_terraform.go b/vendor/github.com/hashicorp/terraform/config/config_terraform.go
new file mode 100644
index 00000000..8535c964
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config_terraform.go
@@ -0,0 +1,117 @@
+package config
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-version"
+ "github.com/mitchellh/hashstructure"
+)
+
+// Terraform is the Terraform meta-configuration that can be present
+// in configuration files for configuring Terraform itself.
+type Terraform struct {
+ RequiredVersion string `hcl:"required_version"` // Required Terraform version (constraint)
+ Backend *Backend // See Backend struct docs
+}
+
+// Validate performs the validation for just the Terraform configuration.
+func (t *Terraform) Validate() []error {
+ var errs []error
+
+ if raw := t.RequiredVersion; raw != "" {
+ // Check that the value has no interpolations
+ rc, err := NewRawConfig(map[string]interface{}{
+ "root": raw,
+ })
+ if err != nil {
+ errs = append(errs, fmt.Errorf(
+ "terraform.required_version: %s", err))
+ } else if len(rc.Interpolations) > 0 {
+ errs = append(errs, fmt.Errorf(
+ "terraform.required_version: cannot contain interpolations"))
+ } else {
+ // Check it is valid
+ _, err := version.NewConstraint(raw)
+ if err != nil {
+ errs = append(errs, fmt.Errorf(
+ "terraform.required_version: invalid syntax: %s", err))
+ }
+ }
+ }
+
+ if t.Backend != nil {
+ errs = append(errs, t.Backend.Validate()...)
+ }
+
+ return errs
+}
+
+// Merge t with t2.
+// Any conflicting fields are overwritten by t2.
+func (t *Terraform) Merge(t2 *Terraform) {
+ if t2.RequiredVersion != "" {
+ t.RequiredVersion = t2.RequiredVersion
+ }
+
+ if t2.Backend != nil {
+ t.Backend = t2.Backend
+ }
+}
+
+// Backend is the configuration for the "backend" to use with Terraform.
+// A backend is responsible for all major behavior of Terraform's core.
+// The abstraction layer above the core (the "backend") allows for behavior
+// such as remote operation.
+type Backend struct {
+ Type string
+ RawConfig *RawConfig
+
+ // Hash is a unique hash code representing the original configuration
+ // of the backend. This won't be recomputed unless Rehash is called.
+ Hash uint64
+}
+
+// Rehash returns a unique content hash for this backend's configuration
+// as a uint64 value.
+func (b *Backend) Rehash() uint64 {
+ // If we have no backend, the value is zero
+ if b == nil {
+ return 0
+ }
+
+ // Use hashstructure to hash only our type with the config.
+ code, err := hashstructure.Hash(map[string]interface{}{
+ "type": b.Type,
+ "config": b.RawConfig.Raw,
+ }, nil)
+
+ // This should never happen since we have just some basic primitives
+ // so panic if there is an error.
+ if err != nil {
+ panic(err)
+ }
+
+ return code
+}
+
+func (b *Backend) Validate() []error {
+ if len(b.RawConfig.Interpolations) > 0 {
+ return []error{fmt.Errorf(strings.TrimSpace(errBackendInterpolations))}
+ }
+
+ return nil
+}
+
+const errBackendInterpolations = `
+terraform.backend: configuration cannot contain interpolations
+
+The backend configuration is loaded by Terraform extremely early, before
+the core of Terraform can be initialized. This is necessary because the backend
+dictates the behavior of that core. The core is what handles interpolation
+processing. Because of this, interpolations cannot be used in backend
+configuration.
+
+If you'd like to parameterize backend configuration, we recommend using
+partial configuration with the "-backend-config" flag to "terraform init".
+`
diff --git a/vendor/github.com/hashicorp/terraform/config/config_tree.go b/vendor/github.com/hashicorp/terraform/config/config_tree.go
new file mode 100644
index 00000000..08dc0fe9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config_tree.go
@@ -0,0 +1,43 @@
+package config
+
+// configTree represents a tree of configurations where the root is the
+// first file and its children are the configurations it has imported.
+type configTree struct {
+ Path string
+ Config *Config
+ Children []*configTree
+}
+
+// Flatten flattens the entire tree down to a single merged Config
+// structure.
+func (t *configTree) Flatten() (*Config, error) {
+ // No children is easy: we're already merged!
+ if len(t.Children) == 0 {
+ return t.Config, nil
+ }
+
+ // Depth-first, merge all the children first.
+ childConfigs := make([]*Config, len(t.Children))
+ for i, ct := range t.Children {
+ c, err := ct.Flatten()
+ if err != nil {
+ return nil, err
+ }
+
+ childConfigs[i] = c
+ }
+
+ // Merge all the children in order
+ config := childConfigs[0]
+ childConfigs = childConfigs[1:]
+ for _, config2 := range childConfigs {
+ var err error
+ config, err = Merge(config, config2)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Merge the final merged child config with our own
+ return Merge(config, t.Config)
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go
new file mode 100644
index 00000000..37ec11a1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/import_tree.go
@@ -0,0 +1,113 @@
+package config
+
+import (
+ "fmt"
+ "io"
+)
+
+// configurable is an interface that must be implemented by any configuration
+// formats of Terraform in order to return a *Config.
+type configurable interface {
+ Config() (*Config, error)
+}
+
+// importTree is the result of the first-pass load of the configuration
+// files. It is a tree of raw configurables and then any children (their
+// imports).
+//
+// An importTree can be turned into a configTree.
+type importTree struct {
+ Path string
+ Raw configurable
+ Children []*importTree
+}
+
+// This is the function type that must be implemented by the configuration
+// file loader to turn a single file into a configurable and any additional
+// imports.
+type fileLoaderFunc func(path string) (configurable, []string, error)
+
+// loadTree takes a single file and loads the entire importTree for that
+// file. This function detects what kind of configuration file it is an
+// executes the proper fileLoaderFunc.
+func loadTree(root string) (*importTree, error) {
+ var f fileLoaderFunc
+ switch ext(root) {
+ case ".tf", ".tf.json":
+ f = loadFileHcl
+ default:
+ }
+
+ if f == nil {
+ return nil, fmt.Errorf(
+ "%s: unknown configuration format. Use '.tf' or '.tf.json' extension",
+ root)
+ }
+
+ c, imps, err := f(root)
+ if err != nil {
+ return nil, err
+ }
+
+ children := make([]*importTree, len(imps))
+ for i, imp := range imps {
+ t, err := loadTree(imp)
+ if err != nil {
+ return nil, err
+ }
+
+ children[i] = t
+ }
+
+ return &importTree{
+ Path: root,
+ Raw: c,
+ Children: children,
+ }, nil
+}
+
+// Close releases any resources we might be holding open for the importTree.
+//
+// This can safely be called even while ConfigTree results are alive. The
+// importTree is not bound to these.
+func (t *importTree) Close() error {
+ if c, ok := t.Raw.(io.Closer); ok {
+ c.Close()
+ }
+ for _, ct := range t.Children {
+ ct.Close()
+ }
+
+ return nil
+}
+
+// ConfigTree traverses the importTree and turns each node into a *Config
+// object, ultimately returning a *configTree.
+func (t *importTree) ConfigTree() (*configTree, error) {
+ config, err := t.Raw.Config()
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error loading %s: %s",
+ t.Path,
+ err)
+ }
+
+ // Build our result
+ result := &configTree{
+ Path: t.Path,
+ Config: config,
+ }
+
+ // Build the config trees for the children
+ result.Children = make([]*configTree, len(t.Children))
+ for i, ct := range t.Children {
+ t, err := ct.ConfigTree()
+ if err != nil {
+ return nil, err
+ }
+
+ result.Children[i] = t
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go
new file mode 100644
index 00000000..bbb35554
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate.go
@@ -0,0 +1,386 @@
+package config
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+// An InterpolatedVariable is a variable reference within an interpolation.
+//
+// Implementations of this interface represents various sources where
+// variables can come from: user variables, resources, etc.
+type InterpolatedVariable interface {
+ FullKey() string
+}
+
+// CountVariable is a variable for referencing information about
+// the count.
+type CountVariable struct {
+ Type CountValueType
+ key string
+}
+
+// CountValueType is the type of the count variable that is referenced.
+type CountValueType byte
+
+const (
+ CountValueInvalid CountValueType = iota
+ CountValueIndex
+)
+
+// A ModuleVariable is a variable that is referencing the output
+// of a module, such as "${module.foo.bar}"
+type ModuleVariable struct {
+ Name string
+ Field string
+ key string
+}
+
+// A PathVariable is a variable that references path information about the
+// module.
+type PathVariable struct {
+ Type PathValueType
+ key string
+}
+
+type PathValueType byte
+
+const (
+ PathValueInvalid PathValueType = iota
+ PathValueCwd
+ PathValueModule
+ PathValueRoot
+)
+
+// A ResourceVariable is a variable that is referencing the field
+// of a resource, such as "${aws_instance.foo.ami}"
+type ResourceVariable struct {
+ Mode ResourceMode
+ Type string // Resource type, i.e. "aws_instance"
+ Name string // Resource name
+ Field string // Resource field
+
+ Multi bool // True if multi-variable: aws_instance.foo.*.id
+ Index int // Index for multi-variable: aws_instance.foo.1.id == 1
+
+ key string
+}
+
+// SelfVariable is a variable that is referencing the same resource
+// it is running on: "${self.address}"
+type SelfVariable struct {
+ Field string
+
+ key string
+}
+
+// SimpleVariable is an unprefixed variable, which can show up when users have
+// strings they are passing down to resources that use interpolation
+// internally. The template_file resource is an example of this.
+type SimpleVariable struct {
+ Key string
+}
+
+// TerraformVariable is a "terraform."-prefixed variable used to access
+// metadata about the Terraform run.
+type TerraformVariable struct {
+ Field string
+ key string
+}
+
+// A UserVariable is a variable that is referencing a user variable
+// that is inputted from outside the configuration. This looks like
+// "${var.foo}"
+type UserVariable struct {
+ Name string
+ Elem string
+
+ key string
+}
+
+func NewInterpolatedVariable(v string) (InterpolatedVariable, error) {
+ if strings.HasPrefix(v, "count.") {
+ return NewCountVariable(v)
+ } else if strings.HasPrefix(v, "path.") {
+ return NewPathVariable(v)
+ } else if strings.HasPrefix(v, "self.") {
+ return NewSelfVariable(v)
+ } else if strings.HasPrefix(v, "terraform.") {
+ return NewTerraformVariable(v)
+ } else if strings.HasPrefix(v, "var.") {
+ return NewUserVariable(v)
+ } else if strings.HasPrefix(v, "module.") {
+ return NewModuleVariable(v)
+ } else if !strings.ContainsRune(v, '.') {
+ return NewSimpleVariable(v)
+ } else {
+ return NewResourceVariable(v)
+ }
+}
+
+func NewCountVariable(key string) (*CountVariable, error) {
+ var fieldType CountValueType
+ parts := strings.SplitN(key, ".", 2)
+ switch parts[1] {
+ case "index":
+ fieldType = CountValueIndex
+ }
+
+ return &CountVariable{
+ Type: fieldType,
+ key: key,
+ }, nil
+}
+
+func (c *CountVariable) FullKey() string {
+ return c.key
+}
+
+func NewModuleVariable(key string) (*ModuleVariable, error) {
+ parts := strings.SplitN(key, ".", 3)
+ if len(parts) < 3 {
+ return nil, fmt.Errorf(
+ "%s: module variables must be three parts: module.name.attr",
+ key)
+ }
+
+ return &ModuleVariable{
+ Name: parts[1],
+ Field: parts[2],
+ key: key,
+ }, nil
+}
+
+func (v *ModuleVariable) FullKey() string {
+ return v.key
+}
+
+func (v *ModuleVariable) GoString() string {
+ return fmt.Sprintf("*%#v", *v)
+}
+
+func NewPathVariable(key string) (*PathVariable, error) {
+ var fieldType PathValueType
+ parts := strings.SplitN(key, ".", 2)
+ switch parts[1] {
+ case "cwd":
+ fieldType = PathValueCwd
+ case "module":
+ fieldType = PathValueModule
+ case "root":
+ fieldType = PathValueRoot
+ }
+
+ return &PathVariable{
+ Type: fieldType,
+ key: key,
+ }, nil
+}
+
+func (v *PathVariable) FullKey() string {
+ return v.key
+}
+
+func NewResourceVariable(key string) (*ResourceVariable, error) {
+ var mode ResourceMode
+ var parts []string
+ if strings.HasPrefix(key, "data.") {
+ mode = DataResourceMode
+ parts = strings.SplitN(key, ".", 4)
+ if len(parts) < 4 {
+ return nil, fmt.Errorf(
+ "%s: data variables must be four parts: data.TYPE.NAME.ATTR",
+ key)
+ }
+
+ // Don't actually need the "data." prefix for parsing, since it's
+ // always constant.
+ parts = parts[1:]
+ } else {
+ mode = ManagedResourceMode
+ parts = strings.SplitN(key, ".", 3)
+ if len(parts) < 3 {
+ return nil, fmt.Errorf(
+ "%s: resource variables must be three parts: TYPE.NAME.ATTR",
+ key)
+ }
+ }
+
+ field := parts[2]
+ multi := false
+ var index int
+
+ if idx := strings.Index(field, "."); idx != -1 {
+ indexStr := field[:idx]
+ multi = indexStr == "*"
+ index = -1
+
+ if !multi {
+ indexInt, err := strconv.ParseInt(indexStr, 0, 0)
+ if err == nil {
+ multi = true
+ index = int(indexInt)
+ }
+ }
+
+ if multi {
+ field = field[idx+1:]
+ }
+ }
+
+ return &ResourceVariable{
+ Mode: mode,
+ Type: parts[0],
+ Name: parts[1],
+ Field: field,
+ Multi: multi,
+ Index: index,
+ key: key,
+ }, nil
+}
+
+func (v *ResourceVariable) ResourceId() string {
+ switch v.Mode {
+ case ManagedResourceMode:
+ return fmt.Sprintf("%s.%s", v.Type, v.Name)
+ case DataResourceMode:
+ return fmt.Sprintf("data.%s.%s", v.Type, v.Name)
+ default:
+ panic(fmt.Errorf("unknown resource mode %s", v.Mode))
+ }
+}
+
+func (v *ResourceVariable) FullKey() string {
+ return v.key
+}
+
+func NewSelfVariable(key string) (*SelfVariable, error) {
+ field := key[len("self."):]
+
+ return &SelfVariable{
+ Field: field,
+
+ key: key,
+ }, nil
+}
+
+func (v *SelfVariable) FullKey() string {
+ return v.key
+}
+
+func (v *SelfVariable) GoString() string {
+ return fmt.Sprintf("*%#v", *v)
+}
+
+func NewSimpleVariable(key string) (*SimpleVariable, error) {
+ return &SimpleVariable{key}, nil
+}
+
+func (v *SimpleVariable) FullKey() string {
+ return v.Key
+}
+
+func (v *SimpleVariable) GoString() string {
+ return fmt.Sprintf("*%#v", *v)
+}
+
+func NewTerraformVariable(key string) (*TerraformVariable, error) {
+ field := key[len("terraform."):]
+ return &TerraformVariable{
+ Field: field,
+ key: key,
+ }, nil
+}
+
+func (v *TerraformVariable) FullKey() string {
+ return v.key
+}
+
+func (v *TerraformVariable) GoString() string {
+ return fmt.Sprintf("*%#v", *v)
+}
+
+func NewUserVariable(key string) (*UserVariable, error) {
+ name := key[len("var."):]
+ elem := ""
+ if idx := strings.Index(name, "."); idx > -1 {
+ elem = name[idx+1:]
+ name = name[:idx]
+ }
+
+ if len(elem) > 0 {
+ return nil, fmt.Errorf("Invalid dot index found: 'var.%s.%s'. Values in maps and lists can be referenced using square bracket indexing, like: 'var.mymap[\"key\"]' or 'var.mylist[1]'.", name, elem)
+ }
+
+ return &UserVariable{
+ key: key,
+
+ Name: name,
+ Elem: elem,
+ }, nil
+}
+
+func (v *UserVariable) FullKey() string {
+ return v.key
+}
+
+func (v *UserVariable) GoString() string {
+ return fmt.Sprintf("*%#v", *v)
+}
+
+// DetectVariables takes an AST root and returns all the interpolated
+// variables that are detected in the AST tree.
+func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) {
+ var result []InterpolatedVariable
+ var resultErr error
+
+ // Visitor callback
+ fn := func(n ast.Node) ast.Node {
+ if resultErr != nil {
+ return n
+ }
+
+ switch vn := n.(type) {
+ case *ast.VariableAccess:
+ v, err := NewInterpolatedVariable(vn.Name)
+ if err != nil {
+ resultErr = err
+ return n
+ }
+ result = append(result, v)
+ case *ast.Index:
+ if va, ok := vn.Target.(*ast.VariableAccess); ok {
+ v, err := NewInterpolatedVariable(va.Name)
+ if err != nil {
+ resultErr = err
+ return n
+ }
+ result = append(result, v)
+ }
+ if va, ok := vn.Key.(*ast.VariableAccess); ok {
+ v, err := NewInterpolatedVariable(va.Name)
+ if err != nil {
+ resultErr = err
+ return n
+ }
+ result = append(result, v)
+ }
+ default:
+ return n
+ }
+
+ return n
+ }
+
+ // Visitor pattern
+ root.Accept(fn)
+
+ if resultErr != nil {
+ return nil, resultErr
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
new file mode 100644
index 00000000..b7933471
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
@@ -0,0 +1,1346 @@
+package config
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "math"
+ "net"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/apparentlymart/go-cidr/cidr"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/hil"
+ "github.com/hashicorp/hil/ast"
+ "github.com/mitchellh/go-homedir"
+)
+
+// stringSliceToVariableValue converts a string slice into the value
+// required to be returned from interpolation functions which return
+// TypeList.
+func stringSliceToVariableValue(values []string) []ast.Variable {
+ output := make([]ast.Variable, len(values))
+ for index, value := range values {
+ output[index] = ast.Variable{
+ Type: ast.TypeString,
+ Value: value,
+ }
+ }
+ return output
+}
+
+func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) {
+ output := make([]string, len(values))
+ for index, value := range values {
+ if value.Type != ast.TypeString {
+ return []string{}, fmt.Errorf("list has non-string element (%T)", value.Type.String())
+ }
+ output[index] = value.Value.(string)
+ }
+ return output, nil
+}
+
+// Funcs is the mapping of built-in functions for configuration.
+func Funcs() map[string]ast.Function {
+ return map[string]ast.Function{
+ "basename": interpolationFuncBasename(),
+ "base64decode": interpolationFuncBase64Decode(),
+ "base64encode": interpolationFuncBase64Encode(),
+ "base64sha256": interpolationFuncBase64Sha256(),
+ "ceil": interpolationFuncCeil(),
+ "chomp": interpolationFuncChomp(),
+ "cidrhost": interpolationFuncCidrHost(),
+ "cidrnetmask": interpolationFuncCidrNetmask(),
+ "cidrsubnet": interpolationFuncCidrSubnet(),
+ "coalesce": interpolationFuncCoalesce(),
+ "coalescelist": interpolationFuncCoalesceList(),
+ "compact": interpolationFuncCompact(),
+ "concat": interpolationFuncConcat(),
+ "dirname": interpolationFuncDirname(),
+ "distinct": interpolationFuncDistinct(),
+ "element": interpolationFuncElement(),
+ "file": interpolationFuncFile(),
+ "matchkeys": interpolationFuncMatchKeys(),
+ "floor": interpolationFuncFloor(),
+ "format": interpolationFuncFormat(),
+ "formatlist": interpolationFuncFormatList(),
+ "index": interpolationFuncIndex(),
+ "join": interpolationFuncJoin(),
+ "jsonencode": interpolationFuncJSONEncode(),
+ "length": interpolationFuncLength(),
+ "list": interpolationFuncList(),
+ "lower": interpolationFuncLower(),
+ "map": interpolationFuncMap(),
+ "max": interpolationFuncMax(),
+ "md5": interpolationFuncMd5(),
+ "merge": interpolationFuncMerge(),
+ "min": interpolationFuncMin(),
+ "pathexpand": interpolationFuncPathExpand(),
+ "uuid": interpolationFuncUUID(),
+ "replace": interpolationFuncReplace(),
+ "sha1": interpolationFuncSha1(),
+ "sha256": interpolationFuncSha256(),
+ "signum": interpolationFuncSignum(),
+ "slice": interpolationFuncSlice(),
+ "sort": interpolationFuncSort(),
+ "split": interpolationFuncSplit(),
+ "substr": interpolationFuncSubstr(),
+ "timestamp": interpolationFuncTimestamp(),
+ "title": interpolationFuncTitle(),
+ "trimspace": interpolationFuncTrimSpace(),
+ "upper": interpolationFuncUpper(),
+ "zipmap": interpolationFuncZipMap(),
+ }
+}
+
+// interpolationFuncList creates a list from the parameters passed
+// to it.
+func interpolationFuncList() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{},
+ ReturnType: ast.TypeList,
+ Variadic: true,
+ VariadicType: ast.TypeAny,
+ Callback: func(args []interface{}) (interface{}, error) {
+ var outputList []ast.Variable
+
+ for i, val := range args {
+ switch v := val.(type) {
+ case string:
+ outputList = append(outputList, ast.Variable{Type: ast.TypeString, Value: v})
+ case []ast.Variable:
+ outputList = append(outputList, ast.Variable{Type: ast.TypeList, Value: v})
+ case map[string]ast.Variable:
+ outputList = append(outputList, ast.Variable{Type: ast.TypeMap, Value: v})
+ default:
+ return nil, fmt.Errorf("unexpected type %T for argument %d in list", v, i)
+ }
+ }
+
+ // we don't support heterogeneous types, so make sure all types match the first
+ if len(outputList) > 0 {
+ firstType := outputList[0].Type
+ for i, v := range outputList[1:] {
+ if v.Type != firstType {
+ return nil, fmt.Errorf("unexpected type %s for argument %d in list", v.Type, i+1)
+ }
+ }
+ }
+
+ return outputList, nil
+ },
+ }
+}
+
+// interpolationFuncMap creates a map from the parameters passed
+// to it.
+func interpolationFuncMap() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{},
+ ReturnType: ast.TypeMap,
+ Variadic: true,
+ VariadicType: ast.TypeAny,
+ Callback: func(args []interface{}) (interface{}, error) {
+ outputMap := make(map[string]ast.Variable)
+
+ if len(args)%2 != 0 {
+ return nil, fmt.Errorf("requires an even number of arguments, got %d", len(args))
+ }
+
+ var firstType *ast.Type
+ for i := 0; i < len(args); i += 2 {
+ key, ok := args[i].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument %d represents a key, so it must be a string", i+1)
+ }
+ val := args[i+1]
+ variable, err := hil.InterfaceToVariable(val)
+ if err != nil {
+ return nil, err
+ }
+ // Enforce map type homogeneity
+ if firstType == nil {
+ firstType = &variable.Type
+ } else if variable.Type != *firstType {
+ return nil, fmt.Errorf("all map values must have the same type, got %s then %s", firstType.Printable(), variable.Type.Printable())
+ }
+ // Check for duplicate keys
+ if _, ok := outputMap[key]; ok {
+ return nil, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key)
+ }
+ outputMap[key] = variable
+ }
+
+ return outputMap, nil
+ },
+ }
+}
+
+// interpolationFuncCompact strips a list of multi-variable values
+// (e.g. as returned by "split") of any empty strings.
+func interpolationFuncCompact() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList},
+ ReturnType: ast.TypeList,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ inputList := args[0].([]ast.Variable)
+
+ var outputList []string
+ for _, val := range inputList {
+ strVal, ok := val.Value.(string)
+ if !ok {
+ return nil, fmt.Errorf(
+ "compact() may only be used with flat lists, this list contains elements of %s",
+ val.Type.Printable())
+ }
+ if strVal == "" {
+ continue
+ }
+
+ outputList = append(outputList, strVal)
+ }
+ return stringSliceToVariableValue(outputList), nil
+ },
+ }
+}
+
+// interpolationFuncCidrHost implements the "cidrhost" function that
+// fills in the host part of a CIDR range address to create a single
+// host address
+func interpolationFuncCidrHost() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{
+ ast.TypeString, // starting CIDR mask
+ ast.TypeInt, // host number to insert
+ },
+ ReturnType: ast.TypeString,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ hostNum := args[1].(int)
+ _, network, err := net.ParseCIDR(args[0].(string))
+ if err != nil {
+ return nil, fmt.Errorf("invalid CIDR expression: %s", err)
+ }
+
+ ip, err := cidr.Host(network, hostNum)
+ if err != nil {
+ return nil, err
+ }
+
+ return ip.String(), nil
+ },
+ }
+}
+
+// interpolationFuncCidrNetmask implements the "cidrnetmask" function
+// that returns the subnet mask in IP address notation.
+func interpolationFuncCidrNetmask() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{
+ ast.TypeString, // CIDR mask
+ },
+ ReturnType: ast.TypeString,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ _, network, err := net.ParseCIDR(args[0].(string))
+ if err != nil {
+ return nil, fmt.Errorf("invalid CIDR expression: %s", err)
+ }
+
+ return net.IP(network.Mask).String(), nil
+ },
+ }
+}
+
+// interpolationFuncCidrSubnet implements the "cidrsubnet" function that
+// adds an additional subnet of the given length onto an existing
+// IP block expressed in CIDR notation.
+func interpolationFuncCidrSubnet() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{
+ ast.TypeString, // starting CIDR mask
+ ast.TypeInt, // number of bits to extend the prefix
+ ast.TypeInt, // network number to append to the prefix
+ },
+ ReturnType: ast.TypeString,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ extraBits := args[1].(int)
+ subnetNum := args[2].(int)
+ _, network, err := net.ParseCIDR(args[0].(string))
+ if err != nil {
+ return nil, fmt.Errorf("invalid CIDR expression: %s", err)
+ }
+
+ // For portability with 32-bit systems where the subnet number
+ // will be a 32-bit int, we only allow extension of 32 bits in
+ // one call even if we're running on a 64-bit machine.
+ // (Of course, this is significant only for IPv6.)
+ if extraBits > 32 {
+ return nil, fmt.Errorf("may not extend prefix by more than 32 bits")
+ }
+
+ newNetwork, err := cidr.Subnet(network, extraBits, subnetNum)
+ if err != nil {
+ return nil, err
+ }
+
+ return newNetwork.String(), nil
+ },
+ }
+}
+
+// interpolationFuncCoalesce implements the "coalesce" function that
+// returns the first non null / empty string from the provided input
+func interpolationFuncCoalesce() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Variadic: true,
+ VariadicType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ if len(args) < 2 {
+ return nil, fmt.Errorf("must provide at least two arguments")
+ }
+ for _, arg := range args {
+ argument := arg.(string)
+
+ if argument != "" {
+ return argument, nil
+ }
+ }
+ return "", nil
+ },
+ }
+}
+
+// interpolationFuncCoalesceList implements the "coalescelist" function that
+// returns the first non empty list from the provided input
+func interpolationFuncCoalesceList() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList},
+ ReturnType: ast.TypeList,
+ Variadic: true,
+ VariadicType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ if len(args) < 2 {
+ return nil, fmt.Errorf("must provide at least two arguments")
+ }
+ for _, arg := range args {
+ argument := arg.([]ast.Variable)
+
+ if len(argument) > 0 {
+ return argument, nil
+ }
+ }
+ return make([]ast.Variable, 0), nil
+ },
+ }
+}
+
+// interpolationFuncConcat implements the "concat" function that concatenates
+// multiple lists.
+func interpolationFuncConcat() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList},
+ ReturnType: ast.TypeList,
+ Variadic: true,
+ VariadicType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ var outputList []ast.Variable
+
+ for _, arg := range args {
+ for _, v := range arg.([]ast.Variable) {
+ switch v.Type {
+ case ast.TypeString:
+ outputList = append(outputList, v)
+ case ast.TypeList:
+ outputList = append(outputList, v)
+ case ast.TypeMap:
+ outputList = append(outputList, v)
+ default:
+ return nil, fmt.Errorf("concat() does not support lists of %s", v.Type.Printable())
+ }
+ }
+ }
+
+ // we don't support heterogeneous types, so make sure all types match the first
+ if len(outputList) > 0 {
+ firstType := outputList[0].Type
+ for _, v := range outputList[1:] {
+ if v.Type != firstType {
+ return nil, fmt.Errorf("unexpected %s in list of %s", v.Type.Printable(), firstType.Printable())
+ }
+ }
+ }
+
+ return outputList, nil
+ },
+ }
+}
+
+// interpolationFuncFile implements the "file" function that allows
+// loading contents from a file.
+func interpolationFuncFile() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ path, err := homedir.Expand(args[0].(string))
+ if err != nil {
+ return "", err
+ }
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return "", err
+ }
+
+ return string(data), nil
+ },
+ }
+}
+
+// interpolationFuncFormat implements the "format" function that does
+// string formatting.
+func interpolationFuncFormat() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ Variadic: true,
+ VariadicType: ast.TypeAny,
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ format := args[0].(string)
+ return fmt.Sprintf(format, args[1:]...), nil
+ },
+ }
+}
+
+// interpolationFuncMax returns the maximum of the numeric arguments
+func interpolationFuncMax() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ ReturnType: ast.TypeFloat,
+ Variadic: true,
+ VariadicType: ast.TypeFloat,
+ Callback: func(args []interface{}) (interface{}, error) {
+ max := args[0].(float64)
+
+ for i := 1; i < len(args); i++ {
+ max = math.Max(max, args[i].(float64))
+ }
+
+ return max, nil
+ },
+ }
+}
+
+// interpolationFuncMin returns the minimum of the numeric arguments
+func interpolationFuncMin() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ ReturnType: ast.TypeFloat,
+ Variadic: true,
+ VariadicType: ast.TypeFloat,
+ Callback: func(args []interface{}) (interface{}, error) {
+ min := args[0].(float64)
+
+ for i := 1; i < len(args); i++ {
+ min = math.Min(min, args[i].(float64))
+ }
+
+ return min, nil
+ },
+ }
+}
+
+// interpolationFuncPathExpand will expand any `~`'s found with the full file path
+func interpolationFuncPathExpand() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return homedir.Expand(args[0].(string))
+ },
+ }
+}
+
+// interpolationFuncCeil returns the the least integer value greater than or equal to the argument
+func interpolationFuncCeil() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ ReturnType: ast.TypeInt,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return int(math.Ceil(args[0].(float64))), nil
+ },
+ }
+}
+
+// interpolationFuncChomp removes trailing newlines from the given string
+func interpolationFuncChomp() ast.Function {
+ newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`)
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return newlines.ReplaceAllString(args[0].(string), ""), nil
+ },
+ }
+}
+
+// interpolationFuncFloorreturns returns the greatest integer value less than or equal to the argument
+func interpolationFuncFloor() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ ReturnType: ast.TypeInt,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return int(math.Floor(args[0].(float64))), nil
+ },
+ }
+}
+
+func interpolationFuncZipMap() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{
+ ast.TypeList, // Keys
+ ast.TypeList, // Values
+ },
+ ReturnType: ast.TypeMap,
+ Callback: func(args []interface{}) (interface{}, error) {
+ keys := args[0].([]ast.Variable)
+ values := args[1].([]ast.Variable)
+
+ if len(keys) != len(values) {
+ return nil, fmt.Errorf("count of keys (%d) does not match count of values (%d)",
+ len(keys), len(values))
+ }
+
+ for i, val := range keys {
+ if val.Type != ast.TypeString {
+ return nil, fmt.Errorf("keys must be strings. value at position %d is %s",
+ i, val.Type.Printable())
+ }
+ }
+
+ result := map[string]ast.Variable{}
+ for i := 0; i < len(keys); i++ {
+ result[keys[i].Value.(string)] = values[i]
+ }
+
+ return result, nil
+ },
+ }
+}
+
+// interpolationFuncFormatList implements the "formatlist" function that does
+// string formatting on lists.
+func interpolationFuncFormatList() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeAny},
+ Variadic: true,
+ VariadicType: ast.TypeAny,
+ ReturnType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ // Make a copy of the variadic part of args
+ // to avoid modifying the original.
+ varargs := make([]interface{}, len(args)-1)
+ copy(varargs, args[1:])
+
+ // Verify we have some arguments
+ if len(varargs) == 0 {
+ return nil, fmt.Errorf("no arguments to formatlist")
+ }
+
+ // Convert arguments that are lists into slices.
+ // Confirm along the way that all lists have the same length (n).
+ var n int
+ listSeen := false
+ for i := 1; i < len(args); i++ {
+ s, ok := args[i].([]ast.Variable)
+ if !ok {
+ continue
+ }
+
+ // Mark that we've seen at least one list
+ listSeen = true
+
+ // Convert the ast.Variable to a slice of strings
+ parts, err := listVariableValueToStringSlice(s)
+ if err != nil {
+ return nil, err
+ }
+
+ // otherwise the list is sent down to be indexed
+ varargs[i-1] = parts
+
+ // Check length
+ if n == 0 {
+ // first list we've seen
+ n = len(parts)
+ continue
+ }
+ if n != len(parts) {
+ return nil, fmt.Errorf("format: mismatched list lengths: %d != %d", n, len(parts))
+ }
+ }
+
+ // If we didn't see a list this is an error because we
+ // can't determine the return value length.
+ if !listSeen {
+ return nil, fmt.Errorf(
+ "formatlist requires at least one list argument")
+ }
+
+ // Do the formatting.
+ format := args[0].(string)
+
+ // Generate a list of formatted strings.
+ list := make([]string, n)
+ fmtargs := make([]interface{}, len(varargs))
+ for i := 0; i < n; i++ {
+ for j, arg := range varargs {
+ switch arg := arg.(type) {
+ default:
+ fmtargs[j] = arg
+ case []string:
+ fmtargs[j] = arg[i]
+ }
+ }
+ list[i] = fmt.Sprintf(format, fmtargs...)
+ }
+ return stringSliceToVariableValue(list), nil
+ },
+ }
+}
+
+// interpolationFuncIndex implements the "index" function that allows one to
+// find the index of a specific element in a list
+func interpolationFuncIndex() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList, ast.TypeString},
+ ReturnType: ast.TypeInt,
+ Callback: func(args []interface{}) (interface{}, error) {
+ haystack := args[0].([]ast.Variable)
+ needle := args[1].(string)
+ for index, element := range haystack {
+ if needle == element.Value {
+ return index, nil
+ }
+ }
+ return nil, fmt.Errorf("Could not find '%s' in '%s'", needle, haystack)
+ },
+ }
+}
+
+// interpolationFuncBasename implements the "dirname" function.
+func interpolationFuncDirname() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return filepath.Dir(args[0].(string)), nil
+ },
+ }
+}
+
+// interpolationFuncDistinct implements the "distinct" function that
+// removes duplicate elements from a list.
+func interpolationFuncDistinct() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList},
+ ReturnType: ast.TypeList,
+ Variadic: true,
+ VariadicType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ var list []string
+
+ if len(args) != 1 {
+ return nil, fmt.Errorf("accepts only one argument.")
+ }
+
+ if argument, ok := args[0].([]ast.Variable); ok {
+ for _, element := range argument {
+ if element.Type != ast.TypeString {
+ return nil, fmt.Errorf(
+ "only works for flat lists, this list contains elements of %s",
+ element.Type.Printable())
+ }
+ list = appendIfMissing(list, element.Value.(string))
+ }
+ }
+
+ return stringSliceToVariableValue(list), nil
+ },
+ }
+}
+
+// helper function to add an element to a list, if it does not already exsit
+func appendIfMissing(slice []string, element string) []string {
+ for _, ele := range slice {
+ if ele == element {
+ return slice
+ }
+ }
+ return append(slice, element)
+}
+
+// for two lists `keys` and `values` of equal length, returns all elements
+// from `values` where the corresponding element from `keys` is in `searchset`.
+func interpolationFuncMatchKeys() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList, ast.TypeList, ast.TypeList},
+ ReturnType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ output := make([]ast.Variable, 0)
+
+ values, _ := args[0].([]ast.Variable)
+ keys, _ := args[1].([]ast.Variable)
+ searchset, _ := args[2].([]ast.Variable)
+
+ if len(keys) != len(values) {
+ return nil, fmt.Errorf("length of keys and values should be equal")
+ }
+
+ for i, key := range keys {
+ for _, search := range searchset {
+ if res, err := compareSimpleVariables(key, search); err != nil {
+ return nil, err
+ } else if res == true {
+ output = append(output, values[i])
+ break
+ }
+ }
+ }
+ // if searchset is empty, then output is an empty list as well.
+ // if we haven't matched any key, then output is an empty list.
+ return output, nil
+ },
+ }
+}
+
+// compare two variables of the same type, i.e. non complex one, such as TypeList or TypeMap
+func compareSimpleVariables(a, b ast.Variable) (bool, error) {
+ if a.Type != b.Type {
+ return false, fmt.Errorf(
+ "won't compare items of different types %s and %s",
+ a.Type.Printable(), b.Type.Printable())
+ }
+ switch a.Type {
+ case ast.TypeString:
+ return a.Value.(string) == b.Value.(string), nil
+ default:
+ return false, fmt.Errorf(
+ "can't compare items of type %s",
+ a.Type.Printable())
+ }
+}
+
+// interpolationFuncJoin implements the "join" function that allows
+// multi-variable values to be joined by some character.
+func interpolationFuncJoin() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ Variadic: true,
+ VariadicType: ast.TypeList,
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ var list []string
+
+ if len(args) < 2 {
+ return nil, fmt.Errorf("not enough arguments to join()")
+ }
+
+ for _, arg := range args[1:] {
+ for _, part := range arg.([]ast.Variable) {
+ if part.Type != ast.TypeString {
+ return nil, fmt.Errorf(
+ "only works on flat lists, this list contains elements of %s",
+ part.Type.Printable())
+ }
+ list = append(list, part.Value.(string))
+ }
+ }
+
+ return strings.Join(list, args[0].(string)), nil
+ },
+ }
+}
+
+// interpolationFuncJSONEncode implements the "jsonencode" function that encodes
+// a string, list, or map as its JSON representation. For now, values in the
+// list or map may only be strings.
+func interpolationFuncJSONEncode() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeAny},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ var toEncode interface{}
+
+ switch typedArg := args[0].(type) {
+ case string:
+ toEncode = typedArg
+
+ case []ast.Variable:
+ // We preallocate the list here. Note that it's important that in
+ // the length 0 case, we have an empty list rather than nil, as
+ // they encode differently.
+ // XXX It would be nice to support arbitrarily nested data here. Is
+ // there an inverse of hil.InterfaceToVariable?
+ strings := make([]string, len(typedArg))
+
+ for i, v := range typedArg {
+ if v.Type != ast.TypeString {
+ return "", fmt.Errorf("list elements must be strings")
+ }
+ strings[i] = v.Value.(string)
+ }
+ toEncode = strings
+
+ case map[string]ast.Variable:
+ // XXX It would be nice to support arbitrarily nested data here. Is
+ // there an inverse of hil.InterfaceToVariable?
+ stringMap := make(map[string]string)
+ for k, v := range typedArg {
+ if v.Type != ast.TypeString {
+ return "", fmt.Errorf("map values must be strings")
+ }
+ stringMap[k] = v.Value.(string)
+ }
+ toEncode = stringMap
+
+ default:
+ return "", fmt.Errorf("unknown type for JSON encoding: %T", args[0])
+ }
+
+ jEnc, err := json.Marshal(toEncode)
+ if err != nil {
+ return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode)
+ }
+ return string(jEnc), nil
+ },
+ }
+}
+
+// interpolationFuncReplace implements the "replace" function that does
+// string replacement.
+func interpolationFuncReplace() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString, ast.TypeString, ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ search := args[1].(string)
+ replace := args[2].(string)
+
+ // We search/replace using a regexp if the string is surrounded
+ // in forward slashes.
+ if len(search) > 1 && search[0] == '/' && search[len(search)-1] == '/' {
+ re, err := regexp.Compile(search[1 : len(search)-1])
+ if err != nil {
+ return nil, err
+ }
+
+ return re.ReplaceAllString(s, replace), nil
+ }
+
+ return strings.Replace(s, search, replace, -1), nil
+ },
+ }
+}
+
+func interpolationFuncLength() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeAny},
+ ReturnType: ast.TypeInt,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ subject := args[0]
+
+ switch typedSubject := subject.(type) {
+ case string:
+ return len(typedSubject), nil
+ case []ast.Variable:
+ return len(typedSubject), nil
+ case map[string]ast.Variable:
+ return len(typedSubject), nil
+ }
+
+ return 0, fmt.Errorf("arguments to length() must be a string, list, or map")
+ },
+ }
+}
+
+func interpolationFuncSignum() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ ReturnType: ast.TypeInt,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ num := args[0].(int)
+ switch {
+ case num < 0:
+ return -1, nil
+ case num > 0:
+ return +1, nil
+ default:
+ return 0, nil
+ }
+ },
+ }
+}
+
+// interpolationFuncSlice returns a portion of the input list between from, inclusive and to, exclusive.
+func interpolationFuncSlice() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{
+ ast.TypeList, // inputList
+ ast.TypeInt, // from
+ ast.TypeInt, // to
+ },
+ ReturnType: ast.TypeList,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ inputList := args[0].([]ast.Variable)
+ from := args[1].(int)
+ to := args[2].(int)
+
+ if from < 0 {
+ return nil, fmt.Errorf("from index must be >= 0")
+ }
+ if to > len(inputList) {
+ return nil, fmt.Errorf("to index must be <= length of the input list")
+ }
+ if from > to {
+ return nil, fmt.Errorf("from index must be <= to index")
+ }
+
+ var outputList []ast.Variable
+ for i, val := range inputList {
+ if i >= from && i < to {
+ outputList = append(outputList, val)
+ }
+ }
+ return outputList, nil
+ },
+ }
+}
+
+// interpolationFuncSort sorts a list of a strings lexographically
+func interpolationFuncSort() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList},
+ ReturnType: ast.TypeList,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ inputList := args[0].([]ast.Variable)
+
+ // Ensure that all the list members are strings and
+ // create a string slice from them
+ members := make([]string, len(inputList))
+ for i, val := range inputList {
+ if val.Type != ast.TypeString {
+ return nil, fmt.Errorf(
+ "sort() may only be used with lists of strings - %s at index %d",
+ val.Type.String(), i)
+ }
+
+ members[i] = val.Value.(string)
+ }
+
+ sort.Strings(members)
+ return stringSliceToVariableValue(members), nil
+ },
+ }
+}
+
+// interpolationFuncSplit implements the "split" function that allows
+// strings to split into multi-variable values
+func interpolationFuncSplit() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString, ast.TypeString},
+ ReturnType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ sep := args[0].(string)
+ s := args[1].(string)
+ elements := strings.Split(s, sep)
+ return stringSliceToVariableValue(elements), nil
+ },
+ }
+}
+
+// interpolationFuncLookup implements the "lookup" function that allows
+// dynamic lookups of map types within a Terraform configuration.
+func interpolationFuncLookup(vs map[string]ast.Variable) ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeMap, ast.TypeString},
+ ReturnType: ast.TypeString,
+ Variadic: true,
+ VariadicType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ defaultValue := ""
+ defaultValueSet := false
+ if len(args) > 2 {
+ defaultValue = args[2].(string)
+ defaultValueSet = true
+ }
+ if len(args) > 3 {
+ return "", fmt.Errorf("lookup() takes no more than three arguments")
+ }
+ index := args[1].(string)
+ mapVar := args[0].(map[string]ast.Variable)
+
+ v, ok := mapVar[index]
+ if !ok {
+ if defaultValueSet {
+ return defaultValue, nil
+ } else {
+ return "", fmt.Errorf(
+ "lookup failed to find '%s'",
+ args[1].(string))
+ }
+ }
+ if v.Type != ast.TypeString {
+ return nil, fmt.Errorf(
+ "lookup() may only be used with flat maps, this map contains elements of %s",
+ v.Type.Printable())
+ }
+
+ return v.Value.(string), nil
+ },
+ }
+}
+
+// interpolationFuncElement implements the "element" function that allows
+// a specific index to be looked up in a multi-variable value. Note that this will
+// wrap if the index is larger than the number of elements in the multi-variable value.
+func interpolationFuncElement() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList, ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ list := args[0].([]ast.Variable)
+ if len(list) == 0 {
+ return nil, fmt.Errorf("element() may not be used with an empty list")
+ }
+
+ index, err := strconv.Atoi(args[1].(string))
+ if err != nil || index < 0 {
+ return "", fmt.Errorf(
+ "invalid number for index, got %s", args[1])
+ }
+
+ resolvedIndex := index % len(list)
+
+ v := list[resolvedIndex]
+ if v.Type != ast.TypeString {
+ return nil, fmt.Errorf(
+ "element() may only be used with flat lists, this list contains elements of %s",
+ v.Type.Printable())
+ }
+ return v.Value, nil
+ },
+ }
+}
+
+// interpolationFuncKeys implements the "keys" function that yields a list of
+// keys of map types within a Terraform configuration.
+func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeMap},
+ ReturnType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ mapVar := args[0].(map[string]ast.Variable)
+ keys := make([]string, 0)
+
+ for k, _ := range mapVar {
+ keys = append(keys, k)
+ }
+
+ sort.Strings(keys)
+
+ // Keys are guaranteed to be strings
+ return stringSliceToVariableValue(keys), nil
+ },
+ }
+}
+
+// interpolationFuncValues implements the "values" function that yields a list of
+// keys of map types within a Terraform configuration.
+func interpolationFuncValues(vs map[string]ast.Variable) ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeMap},
+ ReturnType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ mapVar := args[0].(map[string]ast.Variable)
+ keys := make([]string, 0)
+
+ for k, _ := range mapVar {
+ keys = append(keys, k)
+ }
+
+ sort.Strings(keys)
+
+ values := make([]string, len(keys))
+ for index, key := range keys {
+ if value, ok := mapVar[key].Value.(string); ok {
+ values[index] = value
+ } else {
+ return "", fmt.Errorf("values(): %q has element with bad type %s",
+ key, mapVar[key].Type)
+ }
+ }
+
+ variable, err := hil.InterfaceToVariable(values)
+ if err != nil {
+ return nil, err
+ }
+
+ return variable.Value, nil
+ },
+ }
+}
+
+// interpolationFuncBasename implements the "basename" function.
+func interpolationFuncBasename() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return filepath.Base(args[0].(string)), nil
+ },
+ }
+}
+
+// interpolationFuncBase64Encode implements the "base64encode" function that
+// allows Base64 encoding.
+func interpolationFuncBase64Encode() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ return base64.StdEncoding.EncodeToString([]byte(s)), nil
+ },
+ }
+}
+
+// interpolationFuncBase64Decode implements the "base64decode" function that
+// allows Base64 decoding.
+func interpolationFuncBase64Decode() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ sDec, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ return "", fmt.Errorf("failed to decode base64 data '%s'", s)
+ }
+ return string(sDec), nil
+ },
+ }
+}
+
+// interpolationFuncLower implements the "lower" function that does
+// string lower casing.
+func interpolationFuncLower() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ toLower := args[0].(string)
+ return strings.ToLower(toLower), nil
+ },
+ }
+}
+
+func interpolationFuncMd5() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ h := md5.New()
+ h.Write([]byte(s))
+ hash := hex.EncodeToString(h.Sum(nil))
+ return hash, nil
+ },
+ }
+}
+
+func interpolationFuncMerge() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeMap},
+ ReturnType: ast.TypeMap,
+ Variadic: true,
+ VariadicType: ast.TypeMap,
+ Callback: func(args []interface{}) (interface{}, error) {
+ outputMap := make(map[string]ast.Variable)
+
+ for _, arg := range args {
+ for k, v := range arg.(map[string]ast.Variable) {
+ outputMap[k] = v
+ }
+ }
+
+ return outputMap, nil
+ },
+ }
+}
+
+// interpolationFuncUpper implements the "upper" function that does
+// string upper casing.
+func interpolationFuncUpper() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ toUpper := args[0].(string)
+ return strings.ToUpper(toUpper), nil
+ },
+ }
+}
+
+func interpolationFuncSha1() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ h := sha1.New()
+ h.Write([]byte(s))
+ hash := hex.EncodeToString(h.Sum(nil))
+ return hash, nil
+ },
+ }
+}
+
+// hexadecimal representation of sha256 sum
+func interpolationFuncSha256() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ h := sha256.New()
+ h.Write([]byte(s))
+ hash := hex.EncodeToString(h.Sum(nil))
+ return hash, nil
+ },
+ }
+}
+
+func interpolationFuncTrimSpace() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ trimSpace := args[0].(string)
+ return strings.TrimSpace(trimSpace), nil
+ },
+ }
+}
+
+func interpolationFuncBase64Sha256() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ h := sha256.New()
+ h.Write([]byte(s))
+ shaSum := h.Sum(nil)
+ encoded := base64.StdEncoding.EncodeToString(shaSum[:])
+ return encoded, nil
+ },
+ }
+}
+
+func interpolationFuncUUID() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return uuid.GenerateUUID()
+ },
+ }
+}
+
+// interpolationFuncTimestamp
+func interpolationFuncTimestamp() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return time.Now().UTC().Format(time.RFC3339), nil
+ },
+ }
+}
+
+// interpolationFuncTitle implements the "title" function that returns a copy of the
+// string in which first characters of all the words are capitalized.
+func interpolationFuncTitle() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ toTitle := args[0].(string)
+ return strings.Title(toTitle), nil
+ },
+ }
+}
+
+// interpolationFuncSubstr implements the "substr" function that allows strings
+// to be truncated.
+func interpolationFuncSubstr() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{
+ ast.TypeString, // input string
+ ast.TypeInt, // offset
+ ast.TypeInt, // length
+ },
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ str := args[0].(string)
+ offset := args[1].(int)
+ length := args[2].(int)
+
+ // Interpret a negative offset as being equivalent to a positive
+ // offset taken from the end of the string.
+ if offset < 0 {
+ offset += len(str)
+ }
+
+ // Interpret a length of `-1` as indicating that the substring
+ // should start at `offset` and continue until the end of the
+ // string. Any other negative length (other than `-1`) is invalid.
+ if length == -1 {
+ length = len(str)
+ } else if length >= 0 {
+ length += offset
+ } else {
+ return nil, fmt.Errorf("length should be a non-negative integer")
+ }
+
+ if offset > len(str) {
+ return nil, fmt.Errorf("offset cannot be larger than the length of the string")
+ }
+
+ if length > len(str) {
+ return nil, fmt.Errorf("'offset + length' cannot be larger than the length of the string")
+ }
+
+ return str[offset:length], nil
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
new file mode 100644
index 00000000..ead3d102
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
@@ -0,0 +1,283 @@
+package config
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/hashicorp/hil"
+ "github.com/hashicorp/hil/ast"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// interpolationWalker implements interfaces for the reflectwalk package
+// (github.com/mitchellh/reflectwalk) that can be used to automatically
+// execute a callback for an interpolation.
+type interpolationWalker struct {
+ // F is the function to call for every interpolation. It can be nil.
+ //
+ // If Replace is true, then the return value of F will be used to
+ // replace the interpolation.
+ F interpolationWalkerFunc
+ Replace bool
+
+ // ContextF is an advanced version of F that also receives the
+ // location of where it is in the structure. This lets you do
+ // context-aware validation.
+ ContextF interpolationWalkerContextFunc
+
+ key []string
+ lastValue reflect.Value
+ loc reflectwalk.Location
+ cs []reflect.Value
+ csKey []reflect.Value
+ csData interface{}
+ sliceIndex []int
+ unknownKeys []string
+}
+
+// interpolationWalkerFunc is the callback called by interpolationWalk.
+// It is called with any interpolation found. It should return a value
+// to replace the interpolation with, along with any errors.
+//
+// If Replace is set to false in interpolationWalker, then the replace
+// value can be anything as it will have no effect.
+type interpolationWalkerFunc func(ast.Node) (interface{}, error)
+
+// interpolationWalkerContextFunc is called by interpolationWalk if
+// ContextF is set. This receives both the interpolation and the location
+// where the interpolation is.
+//
+// This callback can be used to validate the location of the interpolation
+// within the configuration.
+type interpolationWalkerContextFunc func(reflectwalk.Location, ast.Node)
+
+func (w *interpolationWalker) Enter(loc reflectwalk.Location) error {
+ w.loc = loc
+ return nil
+}
+
+func (w *interpolationWalker) Exit(loc reflectwalk.Location) error {
+ w.loc = reflectwalk.None
+
+ switch loc {
+ case reflectwalk.Map:
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.MapValue:
+ w.key = w.key[:len(w.key)-1]
+ w.csKey = w.csKey[:len(w.csKey)-1]
+ case reflectwalk.Slice:
+ // Split any values that need to be split
+ w.splitSlice()
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.SliceElem:
+ w.csKey = w.csKey[:len(w.csKey)-1]
+ w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1]
+ }
+
+ return nil
+}
+
+func (w *interpolationWalker) Map(m reflect.Value) error {
+ w.cs = append(w.cs, m)
+ return nil
+}
+
+func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error {
+ w.csData = k
+ w.csKey = append(w.csKey, k)
+
+ if l := len(w.sliceIndex); l > 0 {
+ w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex[l-1], k.String()))
+ } else {
+ w.key = append(w.key, k.String())
+ }
+
+ w.lastValue = v
+ return nil
+}
+
+func (w *interpolationWalker) Slice(s reflect.Value) error {
+ w.cs = append(w.cs, s)
+ return nil
+}
+
+func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error {
+ w.csKey = append(w.csKey, reflect.ValueOf(i))
+ w.sliceIndex = append(w.sliceIndex, i)
+ return nil
+}
+
+func (w *interpolationWalker) Primitive(v reflect.Value) error {
+ setV := v
+
+ // We only care about strings
+ if v.Kind() == reflect.Interface {
+ setV = v
+ v = v.Elem()
+ }
+ if v.Kind() != reflect.String {
+ return nil
+ }
+
+ astRoot, err := hil.Parse(v.String())
+ if err != nil {
+ return err
+ }
+
+ // If the AST we got is just a literal string value with the same
+ // value then we ignore it. We have to check if its the same value
+ // because it is possible to input a string, get out a string, and
+ // have it be different. For example: "foo-$${bar}" turns into
+ // "foo-${bar}"
+ if n, ok := astRoot.(*ast.LiteralNode); ok {
+ if s, ok := n.Value.(string); ok && s == v.String() {
+ return nil
+ }
+ }
+
+ if w.ContextF != nil {
+ w.ContextF(w.loc, astRoot)
+ }
+
+ if w.F == nil {
+ return nil
+ }
+
+ replaceVal, err := w.F(astRoot)
+ if err != nil {
+ return fmt.Errorf(
+ "%s in:\n\n%s",
+ err, v.String())
+ }
+
+ if w.Replace {
+ // We need to determine if we need to remove this element
+ // if the result contains any "UnknownVariableValue" which is
+ // set if it is computed. This behavior is different if we're
+ // splitting (in a SliceElem) or not.
+ remove := false
+ if w.loc == reflectwalk.SliceElem {
+ switch typedReplaceVal := replaceVal.(type) {
+ case string:
+ if typedReplaceVal == UnknownVariableValue {
+ remove = true
+ }
+ case []interface{}:
+ if hasUnknownValue(typedReplaceVal) {
+ remove = true
+ }
+ }
+ } else if replaceVal == UnknownVariableValue {
+ remove = true
+ }
+
+ if remove {
+ w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
+ }
+
+ resultVal := reflect.ValueOf(replaceVal)
+ switch w.loc {
+ case reflectwalk.MapKey:
+ m := w.cs[len(w.cs)-1]
+
+ // Delete the old value
+ var zero reflect.Value
+ m.SetMapIndex(w.csData.(reflect.Value), zero)
+
+ // Set the new key with the existing value
+ m.SetMapIndex(resultVal, w.lastValue)
+
+ // Set the key to be the new key
+ w.csData = resultVal
+ case reflectwalk.MapValue:
+ // If we're in a map, then the only way to set a map value is
+ // to set it directly.
+ m := w.cs[len(w.cs)-1]
+ mk := w.csData.(reflect.Value)
+ m.SetMapIndex(mk, resultVal)
+ default:
+ // Otherwise, we should be addressable
+ setV.Set(resultVal)
+ }
+ }
+
+ return nil
+}
+
+func (w *interpolationWalker) replaceCurrent(v reflect.Value) {
+ // if we don't have at least 2 values, we're not going to find a map, but
+ // we could panic.
+ if len(w.cs) < 2 {
+ return
+ }
+
+ c := w.cs[len(w.cs)-2]
+ switch c.Kind() {
+ case reflect.Map:
+ // Get the key and delete it
+ k := w.csKey[len(w.csKey)-1]
+ c.SetMapIndex(k, v)
+ }
+}
+
+func hasUnknownValue(variable []interface{}) bool {
+ for _, value := range variable {
+ if strVal, ok := value.(string); ok {
+ if strVal == UnknownVariableValue {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (w *interpolationWalker) splitSlice() {
+ raw := w.cs[len(w.cs)-1]
+
+ var s []interface{}
+ switch v := raw.Interface().(type) {
+ case []interface{}:
+ s = v
+ case []map[string]interface{}:
+ return
+ }
+
+ split := false
+ for _, val := range s {
+ if varVal, ok := val.(ast.Variable); ok && varVal.Type == ast.TypeList {
+ split = true
+ }
+ if _, ok := val.([]interface{}); ok {
+ split = true
+ }
+ }
+
+ if !split {
+ return
+ }
+
+ result := make([]interface{}, 0)
+ for _, v := range s {
+ switch val := v.(type) {
+ case ast.Variable:
+ switch val.Type {
+ case ast.TypeList:
+ elements := val.Value.([]ast.Variable)
+ for _, element := range elements {
+ result = append(result, element.Value)
+ }
+ default:
+ result = append(result, val.Value)
+ }
+ case []interface{}:
+ for _, element := range val {
+ result = append(result, element)
+ }
+ default:
+ result = append(result, v)
+ }
+ }
+
+ w.replaceCurrent(reflect.ValueOf(result))
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/lang.go b/vendor/github.com/hashicorp/terraform/config/lang.go
new file mode 100644
index 00000000..890d30be
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/lang.go
@@ -0,0 +1,11 @@
+package config
+
+import (
+ "github.com/hashicorp/hil/ast"
+)
+
+type noopNode struct{}
+
+func (n *noopNode) Accept(ast.Visitor) ast.Node { return n }
+func (n *noopNode) Pos() ast.Pos { return ast.Pos{} }
+func (n *noopNode) Type(ast.Scope) (ast.Type, error) { return ast.TypeString, nil }
diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go
new file mode 100644
index 00000000..0bfa89c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/loader.go
@@ -0,0 +1,224 @@
+package config
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/hcl"
+)
+
+// ErrNoConfigsFound is the error returned by LoadDir if no
+// Terraform configuration files were found in the given directory.
+type ErrNoConfigsFound struct {
+ Dir string
+}
+
+func (e ErrNoConfigsFound) Error() string {
+ return fmt.Sprintf(
+ "No Terraform configuration files found in directory: %s",
+ e.Dir)
+}
+
+// LoadJSON loads a single Terraform configuration from a given JSON document.
+//
+// The document must be a complete Terraform configuration. This function will
+// NOT try to load any additional modules so only the given document is loaded.
+func LoadJSON(raw json.RawMessage) (*Config, error) {
+ obj, err := hcl.Parse(string(raw))
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing JSON document as HCL: %s", err)
+ }
+
+ // Start building the result
+ hclConfig := &hclConfigurable{
+ Root: obj,
+ }
+
+ return hclConfig.Config()
+}
+
+// LoadFile loads the Terraform configuration from a given file.
+//
+// This file can be any format that Terraform recognizes, and import any
+// other format that Terraform recognizes.
+func LoadFile(path string) (*Config, error) {
+ importTree, err := loadTree(path)
+ if err != nil {
+ return nil, err
+ }
+
+ configTree, err := importTree.ConfigTree()
+
+ // Close the importTree now so that we can clear resources as quickly
+ // as possible.
+ importTree.Close()
+
+ if err != nil {
+ return nil, err
+ }
+
+ return configTree.Flatten()
+}
+
+// LoadDir loads all the Terraform configuration files in a single
+// directory and appends them together.
+//
+// Special files known as "override files" can also be present, which
+// are merged into the loaded configuration. That is, the non-override
+// files are loaded first to create the configuration. Then, the overrides
+// are merged into the configuration to create the final configuration.
+//
+// Files are loaded in lexical order.
+func LoadDir(root string) (*Config, error) {
+ files, overrides, err := dirFiles(root)
+ if err != nil {
+ return nil, err
+ }
+ if len(files) == 0 {
+ return nil, &ErrNoConfigsFound{Dir: root}
+ }
+
+ // Determine the absolute path to the directory.
+ rootAbs, err := filepath.Abs(root)
+ if err != nil {
+ return nil, err
+ }
+
+ var result *Config
+
+ // Sort the files and overrides so we have a deterministic order
+ sort.Strings(files)
+ sort.Strings(overrides)
+
+ // Load all the regular files, append them to each other.
+ for _, f := range files {
+ c, err := LoadFile(f)
+ if err != nil {
+ return nil, err
+ }
+
+ if result != nil {
+ result, err = Append(result, c)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ result = c
+ }
+ }
+
+ // Load all the overrides, and merge them into the config
+ for _, f := range overrides {
+ c, err := LoadFile(f)
+ if err != nil {
+ return nil, err
+ }
+
+ result, err = Merge(result, c)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Mark the directory
+ result.Dir = rootAbs
+
+ return result, nil
+}
+
+// IsEmptyDir returns true if the directory given has no Terraform
+// configuration files.
+func IsEmptyDir(root string) (bool, error) {
+ if _, err := os.Stat(root); err != nil && os.IsNotExist(err) {
+ return true, nil
+ }
+
+ fs, os, err := dirFiles(root)
+ if err != nil {
+ return false, err
+ }
+
+ return len(fs) == 0 && len(os) == 0, nil
+}
+
+// Ext returns the Terraform configuration extension of the given
+// path, or a blank string if it is an invalid function.
+func ext(path string) string {
+ if strings.HasSuffix(path, ".tf") {
+ return ".tf"
+ } else if strings.HasSuffix(path, ".tf.json") {
+ return ".tf.json"
+ } else {
+ return ""
+ }
+}
+
+func dirFiles(dir string) ([]string, []string, error) {
+ f, err := os.Open(dir)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer f.Close()
+
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, nil, err
+ }
+ if !fi.IsDir() {
+ return nil, nil, fmt.Errorf(
+ "configuration path must be a directory: %s",
+ dir)
+ }
+
+ var files, overrides []string
+ err = nil
+ for err != io.EOF {
+ var fis []os.FileInfo
+ fis, err = f.Readdir(128)
+ if err != nil && err != io.EOF {
+ return nil, nil, err
+ }
+
+ for _, fi := range fis {
+ // Ignore directories
+ if fi.IsDir() {
+ continue
+ }
+
+ // Only care about files that are valid to load
+ name := fi.Name()
+ extValue := ext(name)
+ if extValue == "" || isIgnoredFile(name) {
+ continue
+ }
+
+ // Determine if we're dealing with an override
+ nameNoExt := name[:len(name)-len(extValue)]
+ override := nameNoExt == "override" ||
+ strings.HasSuffix(nameNoExt, "_override")
+
+ path := filepath.Join(dir, name)
+ if override {
+ overrides = append(overrides, path)
+ } else {
+ files = append(files, path)
+ }
+ }
+ }
+
+ return files, overrides, nil
+}
+
+// isIgnoredFile returns true or false depending on whether the
+// provided file name is a file that should be ignored.
+func isIgnoredFile(name string) bool {
+ return strings.HasPrefix(name, ".") || // Unix-like hidden files
+ strings.HasSuffix(name, "~") || // vim
+ strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go
new file mode 100644
index 00000000..a40ad5ba
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go
@@ -0,0 +1,1091 @@
+package config
+
+import (
+ "fmt"
+ "io/ioutil"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/mitchellh/mapstructure"
+)
+
+// hclConfigurable is an implementation of configurable that knows
+// how to turn HCL configuration into a *Config object.
+type hclConfigurable struct {
+ File string
+ Root *ast.File
+}
+
+func (t *hclConfigurable) Config() (*Config, error) {
+ validKeys := map[string]struct{}{
+ "atlas": struct{}{},
+ "data": struct{}{},
+ "module": struct{}{},
+ "output": struct{}{},
+ "provider": struct{}{},
+ "resource": struct{}{},
+ "terraform": struct{}{},
+ "variable": struct{}{},
+ }
+
+ // Top-level item should be the object list
+ list, ok := t.Root.Node.(*ast.ObjectList)
+ if !ok {
+ return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
+ }
+
+ // Start building up the actual configuration.
+ config := new(Config)
+
+ // Terraform config
+ if o := list.Filter("terraform"); len(o.Items) > 0 {
+ var err error
+ config.Terraform, err = loadTerraformHcl(o)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Build the variables
+ if vars := list.Filter("variable"); len(vars.Items) > 0 {
+ var err error
+ config.Variables, err = loadVariablesHcl(vars)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Get Atlas configuration
+ if atlas := list.Filter("atlas"); len(atlas.Items) > 0 {
+ var err error
+ config.Atlas, err = loadAtlasHcl(atlas)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Build the modules
+ if modules := list.Filter("module"); len(modules.Items) > 0 {
+ var err error
+ config.Modules, err = loadModulesHcl(modules)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Build the provider configs
+ if providers := list.Filter("provider"); len(providers.Items) > 0 {
+ var err error
+ config.ProviderConfigs, err = loadProvidersHcl(providers)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Build the resources
+ {
+ var err error
+ managedResourceConfigs := list.Filter("resource")
+ dataResourceConfigs := list.Filter("data")
+
+ config.Resources = make(
+ []*Resource, 0,
+ len(managedResourceConfigs.Items)+len(dataResourceConfigs.Items),
+ )
+
+ managedResources, err := loadManagedResourcesHcl(managedResourceConfigs)
+ if err != nil {
+ return nil, err
+ }
+ dataResources, err := loadDataResourcesHcl(dataResourceConfigs)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Resources = append(config.Resources, dataResources...)
+ config.Resources = append(config.Resources, managedResources...)
+ }
+
+ // Build the outputs
+ if outputs := list.Filter("output"); len(outputs.Items) > 0 {
+ var err error
+ config.Outputs, err = loadOutputsHcl(outputs)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Check for invalid keys
+ for _, item := range list.Items {
+ if len(item.Keys) == 0 {
+ // Not sure how this would happen, but let's avoid a panic
+ continue
+ }
+
+ k := item.Keys[0].Token.Value().(string)
+ if _, ok := validKeys[k]; ok {
+ continue
+ }
+
+ config.unknownKeys = append(config.unknownKeys, k)
+ }
+
+ return config, nil
+}
+
+// loadFileHcl is a fileLoaderFunc that knows how to read HCL
+// files and turn them into hclConfigurables.
+func loadFileHcl(root string) (configurable, []string, error) {
+ // Read the HCL file and prepare for parsing
+ d, err := ioutil.ReadFile(root)
+ if err != nil {
+ return nil, nil, fmt.Errorf(
+ "Error reading %s: %s", root, err)
+ }
+
+ // Parse it
+ hclRoot, err := hcl.Parse(string(d))
+ if err != nil {
+ return nil, nil, fmt.Errorf(
+ "Error parsing %s: %s", root, err)
+ }
+
+ // Start building the result
+ result := &hclConfigurable{
+ File: root,
+ Root: hclRoot,
+ }
+
+ // Dive in, find the imports. This is disabled for now since
+ // imports were removed prior to Terraform 0.1. The code is
+ // remaining here commented for historical purposes.
+ /*
+ imports := obj.Get("import")
+ if imports == nil {
+ result.Object.Ref()
+ return result, nil, nil
+ }
+
+ if imports.Type() != libucl.ObjectTypeString {
+ imports.Close()
+
+ return nil, nil, fmt.Errorf(
+ "Error in %s: all 'import' declarations should be in the format\n"+
+ "`import \"foo\"` (Got type %s)",
+ root,
+ imports.Type())
+ }
+
+ // Gather all the import paths
+ importPaths := make([]string, 0, imports.Len())
+ iter := imports.Iterate(false)
+ for imp := iter.Next(); imp != nil; imp = iter.Next() {
+ path := imp.ToString()
+ if !filepath.IsAbs(path) {
+ // Relative paths are relative to the Terraform file itself
+ dir := filepath.Dir(root)
+ path = filepath.Join(dir, path)
+ }
+
+ importPaths = append(importPaths, path)
+ imp.Close()
+ }
+ iter.Close()
+ imports.Close()
+
+ result.Object.Ref()
+ */
+
+ return result, nil, nil
+}
+
+// Given a handle to a HCL object, this transforms it into the Terraform config
+func loadTerraformHcl(list *ast.ObjectList) (*Terraform, error) {
+ if len(list.Items) > 1 {
+ return nil, fmt.Errorf("only one 'terraform' block allowed per module")
+ }
+
+ // Get our one item
+ item := list.Items[0]
+
+ // This block should have an empty top level ObjectItem. If there are keys
+ // here, it's likely because we have a flattened JSON object, and we can
+ // lift this into a nested ObjectList to decode properly.
+ if len(item.Keys) > 0 {
+ item = &ast.ObjectItem{
+ Val: &ast.ObjectType{
+ List: &ast.ObjectList{
+ Items: []*ast.ObjectItem{item},
+ },
+ },
+ }
+ }
+
+ // We need the item value as an ObjectList
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("terraform block: should be an object")
+ }
+
+ // NOTE: We purposely don't validate unknown HCL keys here so that
+ // we can potentially read _future_ Terraform version config (to
+ // still be able to validate the required version).
+ //
+ // We should still keep track of unknown keys to validate later, but
+ // HCL doesn't currently support that.
+
+ var config Terraform
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error reading terraform config: %s",
+ err)
+ }
+
+ // If we have provisioners, then parse those out
+ if os := listVal.Filter("backend"); len(os.Items) > 0 {
+ var err error
+ config.Backend, err = loadTerraformBackendHcl(os)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading backend config for terraform block: %s",
+ err)
+ }
+ }
+
+ return &config, nil
+}
+
+// Loads the Backend configuration from an object list.
+func loadTerraformBackendHcl(list *ast.ObjectList) (*Backend, error) {
+ if len(list.Items) > 1 {
+ return nil, fmt.Errorf("only one 'backend' block allowed")
+ }
+
+ // Get our one item
+ item := list.Items[0]
+
+ // Verify the keys
+ if len(item.Keys) != 1 {
+ return nil, fmt.Errorf(
+ "position %s: 'backend' must be followed by exactly one string: a type",
+ item.Pos())
+ }
+
+ typ := item.Keys[0].Token.Value().(string)
+
+ // Decode the raw config
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error reading backend config: %s",
+ err)
+ }
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading backend config: %s",
+ err)
+ }
+
+ b := &Backend{
+ Type: typ,
+ RawConfig: rawConfig,
+ }
+ b.Hash = b.Rehash()
+
+ return b, nil
+}
+
+// Given a handle to a HCL object, this transforms it into the Atlas
+// configuration.
+func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) {
+ if len(list.Items) > 1 {
+ return nil, fmt.Errorf("only one 'atlas' block allowed")
+ }
+
+ // Get our one item
+ item := list.Items[0]
+
+ var config AtlasConfig
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error reading atlas config: %s",
+ err)
+ }
+
+ return &config, nil
+}
+
+// Given a handle to a HCL object, this recurses into the structure
+// and pulls out a list of modules.
+//
+// The resulting modules may not be unique, but each module
+// represents exactly one module definition in the HCL configuration.
+// We leave it up to another pass to merge them together.
+func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, nil
+ }
+
+ // Where all the results will go
+ var result []*Module
+
+ // Now go over all the types and their children in order to get
+ // all of the actual resources.
+ for _, item := range list.Items {
+ k := item.Keys[0].Token.Value().(string)
+
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("module '%s': should be an object", k)
+ }
+
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for %s: %s",
+ k,
+ err)
+ }
+
+ // Remove the fields we handle specially
+ delete(config, "source")
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for %s: %s",
+ k,
+ err)
+ }
+
+ // If we have a count, then figure it out
+ var source string
+ if o := listVal.Filter("source"); len(o.Items) > 0 {
+ err = hcl.DecodeObject(&source, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing source for %s: %s",
+ k,
+ err)
+ }
+ }
+
+ result = append(result, &Module{
+ Name: k,
+ Source: source,
+ RawConfig: rawConfig,
+ })
+ }
+
+ return result, nil
+}
+
+// LoadOutputsHcl recurses into the given HCL object and turns
+// it into a mapping of outputs.
+func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, fmt.Errorf(
+ "'output' must be followed by exactly one string: a name")
+ }
+
+ // Go through each object and turn it into an actual result.
+ result := make([]*Output, 0, len(list.Items))
+ for _, item := range list.Items {
+ n := item.Keys[0].Token.Value().(string)
+
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("output '%s': should be an object", n)
+ }
+
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, err
+ }
+
+ // Delete special keys
+ delete(config, "depends_on")
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for output %s: %s",
+ n,
+ err)
+ }
+
+ // If we have depends fields, then add those in
+ var dependsOn []string
+ if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading depends_on for output %q: %s",
+ n,
+ err)
+ }
+ }
+
+ result = append(result, &Output{
+ Name: n,
+ RawConfig: rawConfig,
+ DependsOn: dependsOn,
+ })
+ }
+
+ return result, nil
+}
+
+// LoadVariablesHcl recurses into the given HCL object and turns
+// it into a list of variables.
+func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, fmt.Errorf(
+ "'variable' must be followed by exactly one strings: a name")
+ }
+
+ // hclVariable is the structure each variable is decoded into
+ type hclVariable struct {
+ DeclaredType string `hcl:"type"`
+ Default interface{}
+ Description string
+ Fields []string `hcl:",decodedFields"`
+ }
+
+ // Go through each object and turn it into an actual result.
+ result := make([]*Variable, 0, len(list.Items))
+ for _, item := range list.Items {
+ // Clean up items from JSON
+ unwrapHCLObjectKeysFromJSON(item, 1)
+
+ // Verify the keys
+ if len(item.Keys) != 1 {
+ return nil, fmt.Errorf(
+ "position %s: 'variable' must be followed by exactly one strings: a name",
+ item.Pos())
+ }
+
+ n := item.Keys[0].Token.Value().(string)
+ if !NameRegexp.MatchString(n) {
+ return nil, fmt.Errorf(
+ "position %s: 'variable' name must match regular expression: %s",
+ item.Pos(), NameRegexp)
+ }
+
+ // Check for invalid keys
+ valid := []string{"type", "default", "description"}
+ if err := checkHCLKeys(item.Val, valid); err != nil {
+ return nil, multierror.Prefix(err, fmt.Sprintf(
+ "variable[%s]:", n))
+ }
+
+ // Decode into hclVariable to get typed values
+ var hclVar hclVariable
+ if err := hcl.DecodeObject(&hclVar, item.Val); err != nil {
+ return nil, err
+ }
+
+ // Defaults turn into a slice of map[string]interface{} and
+ // we need to make sure to convert that down into the
+ // proper type for Config.
+ if ms, ok := hclVar.Default.([]map[string]interface{}); ok {
+ def := make(map[string]interface{})
+ for _, m := range ms {
+ for k, v := range m {
+ def[k] = v
+ }
+ }
+
+ hclVar.Default = def
+ }
+
+ // Build the new variable and do some basic validation
+ newVar := &Variable{
+ Name: n,
+ DeclaredType: hclVar.DeclaredType,
+ Default: hclVar.Default,
+ Description: hclVar.Description,
+ }
+ if err := newVar.ValidateTypeAndDefault(); err != nil {
+ return nil, err
+ }
+
+ result = append(result, newVar)
+ }
+
+ return result, nil
+}
+
+// LoadProvidersHcl recurses into the given HCL object and turns
+// it into a mapping of provider configs.
+func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, nil
+ }
+
+ // Go through each object and turn it into an actual result.
+ result := make([]*ProviderConfig, 0, len(list.Items))
+ for _, item := range list.Items {
+ n := item.Keys[0].Token.Value().(string)
+
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("module '%s': should be an object", n)
+ }
+
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, err
+ }
+
+ delete(config, "alias")
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for provider config %s: %s",
+ n,
+ err)
+ }
+
+ // If we have an alias field, then add those in
+ var alias string
+ if a := listVal.Filter("alias"); len(a.Items) > 0 {
+ err := hcl.DecodeObject(&alias, a.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading alias for provider[%s]: %s",
+ n,
+ err)
+ }
+ }
+
+ result = append(result, &ProviderConfig{
+ Name: n,
+ Alias: alias,
+ RawConfig: rawConfig,
+ })
+ }
+
+ return result, nil
+}
+
+// Given a handle to a HCL object, this recurses into the structure
+// and pulls out a list of data sources.
+//
+// The resulting data sources may not be unique, but each one
+// represents exactly one data definition in the HCL configuration.
+// We leave it up to another pass to merge them together.
+func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, nil
+ }
+
+ // Where all the results will go
+ var result []*Resource
+
+ // Now go over all the types and their children in order to get
+ // all of the actual resources.
+ for _, item := range list.Items {
+ if len(item.Keys) != 2 {
+ return nil, fmt.Errorf(
+ "position %s: 'data' must be followed by exactly two strings: a type and a name",
+ item.Pos())
+ }
+
+ t := item.Keys[0].Token.Value().(string)
+ k := item.Keys[1].Token.Value().(string)
+
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("data sources %s[%s]: should be an object", t, k)
+ }
+
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+
+ // Remove the fields we handle specially
+ delete(config, "depends_on")
+ delete(config, "provider")
+ delete(config, "count")
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+
+ // If we have a count, then figure it out
+ var count string = "1"
+ if o := listVal.Filter("count"); len(o.Items) > 0 {
+ err = hcl.DecodeObject(&count, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing count for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+ countConfig, err := NewRawConfig(map[string]interface{}{
+ "count": count,
+ })
+ if err != nil {
+ return nil, err
+ }
+ countConfig.Key = "count"
+
+ // If we have depends fields, then add those in
+ var dependsOn []string
+ if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading depends_on for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ // If we have a provider, then parse it out
+ var provider string
+ if o := listVal.Filter("provider"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&provider, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading provider for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ result = append(result, &Resource{
+ Mode: DataResourceMode,
+ Name: k,
+ Type: t,
+ RawCount: countConfig,
+ RawConfig: rawConfig,
+ Provider: provider,
+ Provisioners: []*Provisioner{},
+ DependsOn: dependsOn,
+ Lifecycle: ResourceLifecycle{},
+ })
+ }
+
+ return result, nil
+}
+
+// Given a handle to a HCL object, this recurses into the structure
+// and pulls out a list of managed resources.
+//
+// The resulting resources may not be unique, but each resource
+// represents exactly one "resource" block in the HCL configuration.
+// We leave it up to another pass to merge them together.
+func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, nil
+ }
+
+ // Where all the results will go
+ var result []*Resource
+
+ // Now go over all the types and their children in order to get
+ // all of the actual resources.
+ for _, item := range list.Items {
+ // GH-4385: We detect a pure provisioner resource and give the user
+ // an error about how to do it cleanly.
+ if len(item.Keys) == 4 && item.Keys[2].Token.Value().(string) == "provisioner" {
+ return nil, fmt.Errorf(
+ "position %s: provisioners in a resource should be wrapped in a list\n\n"+
+ "Example: \"provisioner\": [ { \"local-exec\": ... } ]",
+ item.Pos())
+ }
+
+ // Fix up JSON input
+ unwrapHCLObjectKeysFromJSON(item, 2)
+
+ if len(item.Keys) != 2 {
+ return nil, fmt.Errorf(
+ "position %s: resource must be followed by exactly two strings, a type and a name",
+ item.Pos())
+ }
+
+ t := item.Keys[0].Token.Value().(string)
+ k := item.Keys[1].Token.Value().(string)
+
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("resources %s[%s]: should be an object", t, k)
+ }
+
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+
+ // Remove the fields we handle specially
+ delete(config, "connection")
+ delete(config, "count")
+ delete(config, "depends_on")
+ delete(config, "provisioner")
+ delete(config, "provider")
+ delete(config, "lifecycle")
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+
+ // If we have a count, then figure it out
+ var count string = "1"
+ if o := listVal.Filter("count"); len(o.Items) > 0 {
+ err = hcl.DecodeObject(&count, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing count for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+ countConfig, err := NewRawConfig(map[string]interface{}{
+ "count": count,
+ })
+ if err != nil {
+ return nil, err
+ }
+ countConfig.Key = "count"
+
+ // If we have depends fields, then add those in
+ var dependsOn []string
+ if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading depends_on for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ // If we have connection info, then parse those out
+ var connInfo map[string]interface{}
+ if o := listVal.Filter("connection"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&connInfo, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading connection info for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ // If we have provisioners, then parse those out
+ var provisioners []*Provisioner
+ if os := listVal.Filter("provisioner"); len(os.Items) > 0 {
+ var err error
+ provisioners, err = loadProvisionersHcl(os, connInfo)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading provisioners for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ // If we have a provider, then parse it out
+ var provider string
+ if o := listVal.Filter("provider"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&provider, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading provider for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ // Check if the resource should be re-created before
+ // destroying the existing instance
+ var lifecycle ResourceLifecycle
+ if o := listVal.Filter("lifecycle"); len(o.Items) > 0 {
+ if len(o.Items) > 1 {
+ return nil, fmt.Errorf(
+ "%s[%s]: Multiple lifecycle blocks found, expected one",
+ t, k)
+ }
+
+ // Check for invalid keys
+ valid := []string{"create_before_destroy", "ignore_changes", "prevent_destroy"}
+ if err := checkHCLKeys(o.Items[0].Val, valid); err != nil {
+ return nil, multierror.Prefix(err, fmt.Sprintf(
+ "%s[%s]:", t, k))
+ }
+
+ var raw map[string]interface{}
+ if err = hcl.DecodeObject(&raw, o.Items[0].Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing lifecycle for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+
+ if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing lifecycle for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ result = append(result, &Resource{
+ Mode: ManagedResourceMode,
+ Name: k,
+ Type: t,
+ RawCount: countConfig,
+ RawConfig: rawConfig,
+ Provisioners: provisioners,
+ Provider: provider,
+ DependsOn: dependsOn,
+ Lifecycle: lifecycle,
+ })
+ }
+
+ return result, nil
+}
+
+func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, nil
+ }
+
+ // Go through each object and turn it into an actual result.
+ result := make([]*Provisioner, 0, len(list.Items))
+ for _, item := range list.Items {
+ n := item.Keys[0].Token.Value().(string)
+
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("provisioner '%s': should be an object", n)
+ }
+
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, err
+ }
+
+ // Parse the "when" value
+ when := ProvisionerWhenCreate
+ if v, ok := config["when"]; ok {
+ switch v {
+ case "create":
+ when = ProvisionerWhenCreate
+ case "destroy":
+ when = ProvisionerWhenDestroy
+ default:
+ return nil, fmt.Errorf(
+ "position %s: 'provisioner' when must be 'create' or 'destroy'",
+ item.Pos())
+ }
+ }
+
+ // Parse the "on_failure" value
+ onFailure := ProvisionerOnFailureFail
+ if v, ok := config["on_failure"]; ok {
+ switch v {
+ case "continue":
+ onFailure = ProvisionerOnFailureContinue
+ case "fail":
+ onFailure = ProvisionerOnFailureFail
+ default:
+ return nil, fmt.Errorf(
+ "position %s: 'provisioner' on_failure must be 'continue' or 'fail'",
+ item.Pos())
+ }
+ }
+
+ // Delete fields we special case
+ delete(config, "connection")
+ delete(config, "when")
+ delete(config, "on_failure")
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check if we have a provisioner-level connection
+ // block that overrides the resource-level
+ var subConnInfo map[string]interface{}
+ if o := listVal.Filter("connection"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&subConnInfo, o.Items[0].Val)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Inherit from the resource connInfo any keys
+ // that are not explicitly overriden.
+ if connInfo != nil && subConnInfo != nil {
+ for k, v := range connInfo {
+ if _, ok := subConnInfo[k]; !ok {
+ subConnInfo[k] = v
+ }
+ }
+ } else if subConnInfo == nil {
+ subConnInfo = connInfo
+ }
+
+ // Parse the connInfo
+ connRaw, err := NewRawConfig(subConnInfo)
+ if err != nil {
+ return nil, err
+ }
+
+ result = append(result, &Provisioner{
+ Type: n,
+ RawConfig: rawConfig,
+ ConnInfo: connRaw,
+ When: when,
+ OnFailure: onFailure,
+ })
+ }
+
+ return result, nil
+}
+
+/*
+func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode {
+ objects := make(map[string][]*hclobj.Object)
+
+ for _, o := range os.Elem(false) {
+ for _, elem := range o.Elem(true) {
+ val, ok := objects[elem.Key]
+ if !ok {
+ val = make([]*hclobj.Object, 0, 1)
+ }
+
+ val = append(val, elem)
+ objects[elem.Key] = val
+ }
+ }
+
+ return objects
+}
+*/
+
+func checkHCLKeys(node ast.Node, valid []string) error {
+ var list *ast.ObjectList
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ list = n
+ case *ast.ObjectType:
+ list = n.List
+ default:
+ return fmt.Errorf("cannot check HCL keys of type %T", n)
+ }
+
+ validMap := make(map[string]struct{}, len(valid))
+ for _, v := range valid {
+ validMap[v] = struct{}{}
+ }
+
+ var result error
+ for _, item := range list.Items {
+ key := item.Keys[0].Token.Value().(string)
+ if _, ok := validMap[key]; !ok {
+ result = multierror.Append(result, fmt.Errorf(
+ "invalid key: %s", key))
+ }
+ }
+
+ return result
+}
+
+// unwrapHCLObjectKeysFromJSON cleans up an edge case that can occur when
+// parsing JSON as input: if we're parsing JSON then directly nested
+// items will show up as additional "keys".
+//
+// For objects that expect a fixed number of keys, this breaks the
+// decoding process. This function unwraps the object into what it would've
+// looked like if it came directly from HCL by specifying the number of keys
+// you expect.
+//
+// Example:
+//
+// { "foo": { "baz": {} } }
+//
+// Will show up with Keys being: []string{"foo", "baz"}
+// when we really just want the first two. This function will fix this.
+func unwrapHCLObjectKeysFromJSON(item *ast.ObjectItem, depth int) {
+ if len(item.Keys) > depth && item.Keys[0].Token.JSON {
+ for len(item.Keys) > depth {
+ // Pop off the last key
+ n := len(item.Keys)
+ key := item.Keys[n-1]
+ item.Keys[n-1] = nil
+ item.Keys = item.Keys[:n-1]
+
+ // Wrap our value in a list
+ item.Val = &ast.ObjectType{
+ List: &ast.ObjectList{
+ Items: []*ast.ObjectItem{
+ &ast.ObjectItem{
+ Keys: []*ast.ObjectKey{key},
+ Val: item.Val,
+ },
+ },
+ },
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go
new file mode 100644
index 00000000..db214be4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/merge.go
@@ -0,0 +1,193 @@
+package config
+
+// Merge merges two configurations into a single configuration.
+//
+// Merge allows for the two configurations to have duplicate resources,
+// because the resources will be merged. This differs from a single
+// Config which must only have unique resources.
+func Merge(c1, c2 *Config) (*Config, error) {
+ c := new(Config)
+
+ // Merge unknown keys
+ unknowns := make(map[string]struct{})
+ for _, k := range c1.unknownKeys {
+ _, present := unknowns[k]
+ if !present {
+ unknowns[k] = struct{}{}
+ c.unknownKeys = append(c.unknownKeys, k)
+ }
+ }
+ for _, k := range c2.unknownKeys {
+ _, present := unknowns[k]
+ if !present {
+ unknowns[k] = struct{}{}
+ c.unknownKeys = append(c.unknownKeys, k)
+ }
+ }
+
+ // Merge Atlas configuration. This is a dumb one overrides the other
+ // sort of merge.
+ c.Atlas = c1.Atlas
+ if c2.Atlas != nil {
+ c.Atlas = c2.Atlas
+ }
+
+ // Merge the Terraform configuration
+ if c1.Terraform != nil {
+ c.Terraform = c1.Terraform
+ if c2.Terraform != nil {
+ c.Terraform.Merge(c2.Terraform)
+ }
+ } else {
+ c.Terraform = c2.Terraform
+ }
+
+ // NOTE: Everything below is pretty gross. Due to the lack of generics
+ // in Go, there is some hoop-jumping involved to make this merging a
+ // little more test-friendly and less repetitive. Ironically, making it
+ // less repetitive involves being a little repetitive, but I prefer to
+ // be repetitive with things that are less error prone than things that
+ // are more error prone (more logic). Type conversions to an interface
+ // are pretty low-error.
+
+ var m1, m2, mresult []merger
+
+ // Modules
+ m1 = make([]merger, 0, len(c1.Modules))
+ m2 = make([]merger, 0, len(c2.Modules))
+ for _, v := range c1.Modules {
+ m1 = append(m1, v)
+ }
+ for _, v := range c2.Modules {
+ m2 = append(m2, v)
+ }
+ mresult = mergeSlice(m1, m2)
+ if len(mresult) > 0 {
+ c.Modules = make([]*Module, len(mresult))
+ for i, v := range mresult {
+ c.Modules[i] = v.(*Module)
+ }
+ }
+
+ // Outputs
+ m1 = make([]merger, 0, len(c1.Outputs))
+ m2 = make([]merger, 0, len(c2.Outputs))
+ for _, v := range c1.Outputs {
+ m1 = append(m1, v)
+ }
+ for _, v := range c2.Outputs {
+ m2 = append(m2, v)
+ }
+ mresult = mergeSlice(m1, m2)
+ if len(mresult) > 0 {
+ c.Outputs = make([]*Output, len(mresult))
+ for i, v := range mresult {
+ c.Outputs[i] = v.(*Output)
+ }
+ }
+
+ // Provider Configs
+ m1 = make([]merger, 0, len(c1.ProviderConfigs))
+ m2 = make([]merger, 0, len(c2.ProviderConfigs))
+ for _, v := range c1.ProviderConfigs {
+ m1 = append(m1, v)
+ }
+ for _, v := range c2.ProviderConfigs {
+ m2 = append(m2, v)
+ }
+ mresult = mergeSlice(m1, m2)
+ if len(mresult) > 0 {
+ c.ProviderConfigs = make([]*ProviderConfig, len(mresult))
+ for i, v := range mresult {
+ c.ProviderConfigs[i] = v.(*ProviderConfig)
+ }
+ }
+
+ // Resources
+ m1 = make([]merger, 0, len(c1.Resources))
+ m2 = make([]merger, 0, len(c2.Resources))
+ for _, v := range c1.Resources {
+ m1 = append(m1, v)
+ }
+ for _, v := range c2.Resources {
+ m2 = append(m2, v)
+ }
+ mresult = mergeSlice(m1, m2)
+ if len(mresult) > 0 {
+ c.Resources = make([]*Resource, len(mresult))
+ for i, v := range mresult {
+ c.Resources[i] = v.(*Resource)
+ }
+ }
+
+ // Variables
+ m1 = make([]merger, 0, len(c1.Variables))
+ m2 = make([]merger, 0, len(c2.Variables))
+ for _, v := range c1.Variables {
+ m1 = append(m1, v)
+ }
+ for _, v := range c2.Variables {
+ m2 = append(m2, v)
+ }
+ mresult = mergeSlice(m1, m2)
+ if len(mresult) > 0 {
+ c.Variables = make([]*Variable, len(mresult))
+ for i, v := range mresult {
+ c.Variables[i] = v.(*Variable)
+ }
+ }
+
+ return c, nil
+}
+
+// merger is an interface that must be implemented by types that are
+// merge-able. This simplifies the implementation of Merge for the various
+// components of a Config.
+type merger interface {
+ mergerName() string
+ mergerMerge(merger) merger
+}
+
+// mergeSlice merges a slice of mergers.
+func mergeSlice(m1, m2 []merger) []merger {
+ r := make([]merger, len(m1), len(m1)+len(m2))
+ copy(r, m1)
+
+ m := map[string]struct{}{}
+ for _, v2 := range m2 {
+ // If we already saw it, just append it because its a
+ // duplicate and invalid...
+ name := v2.mergerName()
+ if _, ok := m[name]; ok {
+ r = append(r, v2)
+ continue
+ }
+ m[name] = struct{}{}
+
+ // Find an original to override
+ var original merger
+ originalIndex := -1
+ for i, v := range m1 {
+ if v.mergerName() == name {
+ originalIndex = i
+ original = v
+ break
+ }
+ }
+
+ var v merger
+ if original == nil {
+ v = v2
+ } else {
+ v = original.mergerMerge(v2)
+ }
+
+ if originalIndex == -1 {
+ r = append(r, v)
+ } else {
+ r[originalIndex] = v
+ }
+ }
+
+ return r
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go
new file mode 100644
index 00000000..095f61d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go
@@ -0,0 +1,114 @@
+package module
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// copyDir copies the src directory contents into dst. Both directories
+// should already exist.
+func copyDir(dst, src string) error {
+ src, err := filepath.EvalSymlinks(src)
+ if err != nil {
+ return err
+ }
+
+ walkFn := func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if path == src {
+ return nil
+ }
+
+ if strings.HasPrefix(filepath.Base(path), ".") {
+ // Skip any dot files
+ if info.IsDir() {
+ return filepath.SkipDir
+ } else {
+ return nil
+ }
+ }
+
+ // The "path" has the src prefixed to it. We need to join our
+ // destination with the path without the src on it.
+ dstPath := filepath.Join(dst, path[len(src):])
+
+ // we don't want to try and copy the same file over itself.
+ if eq, err := sameFile(path, dstPath); eq {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ // If we have a directory, make that subdirectory, then continue
+ // the walk.
+ if info.IsDir() {
+ if path == filepath.Join(src, dst) {
+ // dst is in src; don't walk it.
+ return nil
+ }
+
+ if err := os.MkdirAll(dstPath, 0755); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ // If we have a file, copy the contents.
+ srcF, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ dstF, err := os.Create(dstPath)
+ if err != nil {
+ return err
+ }
+ defer dstF.Close()
+
+ if _, err := io.Copy(dstF, srcF); err != nil {
+ return err
+ }
+
+ // Chmod it
+ return os.Chmod(dstPath, info.Mode())
+ }
+
+ return filepath.Walk(src, walkFn)
+}
+
+// sameFile tried to determine if to paths are the same file.
+// If the paths don't match, we lookup the inode on supported systems.
+func sameFile(a, b string) (bool, error) {
+ if a == b {
+ return true, nil
+ }
+
+ aIno, err := inode(a)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+ }
+
+ bIno, err := inode(b)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+ }
+
+ if aIno > 0 && aIno == bIno {
+ return true, nil
+ }
+
+ return false, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go
new file mode 100644
index 00000000..96b4a63c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/get.go
@@ -0,0 +1,71 @@
+package module
+
+import (
+ "io/ioutil"
+ "os"
+
+ "github.com/hashicorp/go-getter"
+)
+
+// GetMode is an enum that describes how modules are loaded.
+//
+// GetModeLoad says that modules will not be downloaded or updated, they will
+// only be loaded from the storage.
+//
+// GetModeGet says that modules can be initially downloaded if they don't
+// exist, but otherwise to just load from the current version in storage.
+//
+// GetModeUpdate says that modules should be checked for updates and
+// downloaded prior to loading. If there are no updates, we load the version
+// from disk, otherwise we download first and then load.
+type GetMode byte
+
+const (
+ GetModeNone GetMode = iota
+ GetModeGet
+ GetModeUpdate
+)
+
+// GetCopy is the same as Get except that it downloads a copy of the
+// module represented by source.
+//
+// This copy will omit and dot-prefixed files (such as .git/, .hg/) and
+// can't be updated on its own.
+func GetCopy(dst, src string) error {
+ // Create the temporary directory to do the real Get to
+ tmpDir, err := ioutil.TempDir("", "tf")
+ if err != nil {
+ return err
+ }
+ // FIXME: This isn't completely safe. Creating and removing our temp path
+ // exposes where to race to inject files.
+ if err := os.RemoveAll(tmpDir); err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpDir)
+
+ // Get to that temporary dir
+ if err := getter.Get(tmpDir, src); err != nil {
+ return err
+ }
+
+ // Make sure the destination exists
+ if err := os.MkdirAll(dst, 0755); err != nil {
+ return err
+ }
+
+ // Copy to the final location
+ return copyDir(dst, tmpDir)
+}
+
+func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) {
+ // Get the module with the level specified if we were told to.
+ if mode > GetModeNone {
+ if err := s.Get(key, src, mode == GetModeUpdate); err != nil {
+ return "", false, err
+ }
+ }
+
+ // Get the directory where the module is.
+ return s.Dir(key)
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go
new file mode 100644
index 00000000..8603ee26
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode.go
@@ -0,0 +1,21 @@
+// +build linux darwin openbsd netbsd solaris
+
+package module
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+)
+
+// lookup the inode of a file on posix systems
+func inode(path string) (uint64, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ if st, ok := stat.Sys().(*syscall.Stat_t); ok {
+ return st.Ino, nil
+ }
+ return 0, fmt.Errorf("could not determine file inode")
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go
new file mode 100644
index 00000000..0d95730d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go
@@ -0,0 +1,21 @@
+// +build freebsd
+
+package module
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+)
+
+// lookup the inode of a file on posix systems
+func inode(path string) (uint64, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ if st, ok := stat.Sys().(*syscall.Stat_t); ok {
+ return uint64(st.Ino), nil
+ }
+ return 0, fmt.Errorf("could not determine file inode")
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go
new file mode 100644
index 00000000..c0cf4553
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go
@@ -0,0 +1,8 @@
+// +build windows
+
+package module
+
+// no syscall.Stat_t on windows, return 0 for inodes
+func inode(path string) (uint64, error) {
+ return 0, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/module.go b/vendor/github.com/hashicorp/terraform/config/module/module.go
new file mode 100644
index 00000000..f8649f6e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/module.go
@@ -0,0 +1,7 @@
+package module
+
+// Module represents the metadata for a single module.
+type Module struct {
+ Name string
+ Source string
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/testing.go b/vendor/github.com/hashicorp/terraform/config/module/testing.go
new file mode 100644
index 00000000..fc9e7331
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/testing.go
@@ -0,0 +1,38 @@
+package module
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/hashicorp/go-getter"
+)
+
+// TestTree loads a module at the given path and returns the tree as well
+// as a function that should be deferred to clean up resources.
+func TestTree(t *testing.T, path string) (*Tree, func()) {
+ // Create a temporary directory for module storage
+ dir, err := ioutil.TempDir("", "tf")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ return nil, nil
+ }
+
+ // Load the module
+ mod, err := NewTreeModule("", path)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ return nil, nil
+ }
+
+ // Get the child modules
+ s := &getter.FolderStorage{StorageDir: dir}
+ if err := mod.Load(s, GetModeGet); err != nil {
+ t.Fatalf("err: %s", err)
+ return nil, nil
+ }
+
+ return mod, func() {
+ os.RemoveAll(dir)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go
new file mode 100644
index 00000000..b6f90fd9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/tree.go
@@ -0,0 +1,428 @@
+package module
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-getter"
+ "github.com/hashicorp/terraform/config"
+)
+
+// RootName is the name of the root tree.
+const RootName = "root"
+
+// Tree represents the module import tree of configurations.
+//
+// This Tree structure can be used to get (download) new modules, load
+// all the modules without getting, flatten the tree into something
+// Terraform can use, etc.
+type Tree struct {
+ name string
+ config *config.Config
+ children map[string]*Tree
+ path []string
+ lock sync.RWMutex
+}
+
+// NewTree returns a new Tree for the given config structure.
+func NewTree(name string, c *config.Config) *Tree {
+ return &Tree{config: c, name: name}
+}
+
+// NewEmptyTree returns a new tree that is empty (contains no configuration).
+func NewEmptyTree() *Tree {
+ t := &Tree{config: &config.Config{}}
+
+ // We do this dummy load so that the tree is marked as "loaded". It
+ // should never fail because this is just about a no-op. If it does fail
+ // we panic so we can know its a bug.
+ if err := t.Load(nil, GetModeGet); err != nil {
+ panic(err)
+ }
+
+ return t
+}
+
+// NewTreeModule is like NewTree except it parses the configuration in
+// the directory and gives it a specific name. Use a blank name "" to specify
+// the root module.
+func NewTreeModule(name, dir string) (*Tree, error) {
+ c, err := config.LoadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewTree(name, c), nil
+}
+
+// Config returns the configuration for this module.
+func (t *Tree) Config() *config.Config {
+ return t.config
+}
+
+// Child returns the child with the given path (by name).
+func (t *Tree) Child(path []string) *Tree {
+ if t == nil {
+ return nil
+ }
+
+ if len(path) == 0 {
+ return t
+ }
+
+ c := t.Children()[path[0]]
+ if c == nil {
+ return nil
+ }
+
+ return c.Child(path[1:])
+}
+
+// Children returns the children of this tree (the modules that are
+// imported by this root).
+//
+// This will only return a non-nil value after Load is called.
+func (t *Tree) Children() map[string]*Tree {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+ return t.children
+}
+
+// Loaded says whether or not this tree has been loaded or not yet.
+func (t *Tree) Loaded() bool {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+ return t.children != nil
+}
+
+// Modules returns the list of modules that this tree imports.
+//
+// This is only the imports of _this_ level of the tree. To retrieve the
+// full nested imports, you'll have to traverse the tree.
+func (t *Tree) Modules() []*Module {
+ result := make([]*Module, len(t.config.Modules))
+ for i, m := range t.config.Modules {
+ result[i] = &Module{
+ Name: m.Name,
+ Source: m.Source,
+ }
+ }
+
+ return result
+}
+
+// Name returns the name of the tree. This will be "<root>" for the root
+// tree and then the module name given for any children.
+func (t *Tree) Name() string {
+ if t.name == "" {
+ return RootName
+ }
+
+ return t.name
+}
+
+// Load loads the configuration of the entire tree.
+//
+// The parameters are used to tell the tree where to find modules and
+// whether it can download/update modules along the way.
+//
+// Calling this multiple times will reload the tree.
+//
+// Various semantic-like checks are made along the way of loading since
+// module trees inherently require the configuration to be in a reasonably
+// sane state: no circular dependencies, proper module sources, etc. A full
+// suite of validations can be done by running Validate (after loading).
+func (t *Tree) Load(s getter.Storage, mode GetMode) error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ // Reset the children if we have any
+ t.children = nil
+
+ modules := t.Modules()
+ children := make(map[string]*Tree)
+
+ // Go through all the modules and get the directory for them.
+ for _, m := range modules {
+ if _, ok := children[m.Name]; ok {
+ return fmt.Errorf(
+ "module %s: duplicated. module names must be unique", m.Name)
+ }
+
+ // Determine the path to this child
+ path := make([]string, len(t.path), len(t.path)+1)
+ copy(path, t.path)
+ path = append(path, m.Name)
+
+ // Split out the subdir if we have one
+ source, subDir := getter.SourceDirSubdir(m.Source)
+
+ source, err := getter.Detect(source, t.config.Dir, getter.Detectors)
+ if err != nil {
+ return fmt.Errorf("module %s: %s", m.Name, err)
+ }
+
+ // Check if the detector introduced something new.
+ source, subDir2 := getter.SourceDirSubdir(source)
+ if subDir2 != "" {
+ subDir = filepath.Join(subDir2, subDir)
+ }
+
+ // Get the directory where this module is so we can load it
+ key := strings.Join(path, ".")
+ key = fmt.Sprintf("root.%s-%s", key, m.Source)
+ dir, ok, err := getStorage(s, key, source, mode)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf(
+ "module %s: not found, may need to be downloaded using 'terraform get'", m.Name)
+ }
+
+ // If we have a subdirectory, then merge that in
+ if subDir != "" {
+ dir = filepath.Join(dir, subDir)
+ }
+
+ // Load the configurations.Dir(source)
+ children[m.Name], err = NewTreeModule(m.Name, dir)
+ if err != nil {
+ return fmt.Errorf(
+ "module %s: %s", m.Name, err)
+ }
+
+ // Set the path of this child
+ children[m.Name].path = path
+ }
+
+ // Go through all the children and load them.
+ for _, c := range children {
+ if err := c.Load(s, mode); err != nil {
+ return err
+ }
+ }
+
+ // Set our tree up
+ t.children = children
+
+ return nil
+}
+
+// Path is the full path to this tree.
+func (t *Tree) Path() []string {
+ return t.path
+}
+
+// String gives a nice output to describe the tree.
+func (t *Tree) String() string {
+ var result bytes.Buffer
+ path := strings.Join(t.path, ", ")
+ if path != "" {
+ path = fmt.Sprintf(" (path: %s)", path)
+ }
+ result.WriteString(t.Name() + path + "\n")
+
+ cs := t.Children()
+ if cs == nil {
+ result.WriteString(" not loaded")
+ } else {
+ // Go through each child and get its string value, then indent it
+ // by two.
+ for _, c := range cs {
+ r := strings.NewReader(c.String())
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ result.WriteString(" ")
+ result.WriteString(scanner.Text())
+ result.WriteString("\n")
+ }
+ }
+ }
+
+ return result.String()
+}
+
+// Validate does semantic checks on the entire tree of configurations.
+//
+// This will call the respective config.Config.Validate() functions as well
+// as verifying things such as parameters/outputs between the various modules.
+//
+// Load must be called prior to calling Validate or an error will be returned.
+func (t *Tree) Validate() error {
+ if !t.Loaded() {
+ return fmt.Errorf("tree must be loaded before calling Validate")
+ }
+
+ // If something goes wrong, here is our error template
+ newErr := &treeError{Name: []string{t.Name()}}
+
+ // Terraform core does not handle root module children named "root".
+ // We plan to fix this in the future but this bug was brought up in
+ // the middle of a release and we don't want to introduce wide-sweeping
+ // changes at that time.
+ if len(t.path) == 1 && t.name == "root" {
+ return fmt.Errorf("root module cannot contain module named 'root'")
+ }
+
+ // Validate our configuration first.
+ if err := t.config.Validate(); err != nil {
+ newErr.Add(err)
+ }
+
+ // If we're the root, we do extra validation. This validation usually
+ // requires the entire tree (since children don't have parent pointers).
+ if len(t.path) == 0 {
+ if err := t.validateProviderAlias(); err != nil {
+ newErr.Add(err)
+ }
+ }
+
+ // Get the child trees
+ children := t.Children()
+
+ // Validate all our children
+ for _, c := range children {
+ err := c.Validate()
+ if err == nil {
+ continue
+ }
+
+ verr, ok := err.(*treeError)
+ if !ok {
+ // Unknown error, just return...
+ return err
+ }
+
+ // Append ourselves to the error and then return
+ verr.Name = append(verr.Name, t.Name())
+ newErr.AddChild(verr)
+ }
+
+ // Go over all the modules and verify that any parameters are valid
+ // variables into the module in question.
+ for _, m := range t.config.Modules {
+ tree, ok := children[m.Name]
+ if !ok {
+ // This should never happen because Load watches us
+ panic("module not found in children: " + m.Name)
+ }
+
+ // Build the variables that the module defines
+ requiredMap := make(map[string]struct{})
+ varMap := make(map[string]struct{})
+ for _, v := range tree.config.Variables {
+ varMap[v.Name] = struct{}{}
+
+ if v.Required() {
+ requiredMap[v.Name] = struct{}{}
+ }
+ }
+
+ // Compare to the keys in our raw config for the module
+ for k, _ := range m.RawConfig.Raw {
+ if _, ok := varMap[k]; !ok {
+ newErr.Add(fmt.Errorf(
+ "module %s: %s is not a valid parameter",
+ m.Name, k))
+ }
+
+ // Remove the required
+ delete(requiredMap, k)
+ }
+
+ // If we have any required left over, they aren't set.
+ for k, _ := range requiredMap {
+ newErr.Add(fmt.Errorf(
+ "module %s: required variable %q not set",
+ m.Name, k))
+ }
+ }
+
+ // Go over all the variables used and make sure that any module
+ // variables represent outputs properly.
+ for source, vs := range t.config.InterpolatedVariables() {
+ for _, v := range vs {
+ mv, ok := v.(*config.ModuleVariable)
+ if !ok {
+ continue
+ }
+
+ tree, ok := children[mv.Name]
+ if !ok {
+ newErr.Add(fmt.Errorf(
+ "%s: undefined module referenced %s",
+ source, mv.Name))
+ continue
+ }
+
+ found := false
+ for _, o := range tree.config.Outputs {
+ if o.Name == mv.Field {
+ found = true
+ break
+ }
+ }
+ if !found {
+ newErr.Add(fmt.Errorf(
+ "%s: %s is not a valid output for module %s",
+ source, mv.Field, mv.Name))
+ }
+ }
+ }
+
+ return newErr.ErrOrNil()
+}
+
+// treeError is an error use by Tree.Validate to accumulates all
+// validation errors.
+type treeError struct {
+ Name []string
+ Errs []error
+ Children []*treeError
+}
+
+func (e *treeError) Add(err error) {
+ e.Errs = append(e.Errs, err)
+}
+
+func (e *treeError) AddChild(err *treeError) {
+ e.Children = append(e.Children, err)
+}
+
+func (e *treeError) ErrOrNil() error {
+ if len(e.Errs) > 0 || len(e.Children) > 0 {
+ return e
+ }
+ return nil
+}
+
+func (e *treeError) Error() string {
+ name := strings.Join(e.Name, ".")
+ var out bytes.Buffer
+ fmt.Fprintf(&out, "module %s: ", name)
+
+ if len(e.Errs) == 1 {
+ // single like error
+ out.WriteString(e.Errs[0].Error())
+ } else {
+ // multi-line error
+ for _, err := range e.Errs {
+ fmt.Fprintf(&out, "\n %s", err)
+ }
+ }
+
+ if len(e.Children) > 0 {
+ // start the next error on a new line
+ out.WriteString("\n ")
+ }
+ for _, child := range e.Children {
+ out.WriteString(child.Error())
+ }
+
+ return out.String()
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go
new file mode 100644
index 00000000..fcd37f4e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go
@@ -0,0 +1,57 @@
+package module
+
+import (
+ "bytes"
+ "encoding/gob"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+func (t *Tree) GobDecode(bs []byte) error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ // Decode the gob data
+ var data treeGob
+ dec := gob.NewDecoder(bytes.NewReader(bs))
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ // Set the fields
+ t.name = data.Name
+ t.config = data.Config
+ t.children = data.Children
+ t.path = data.Path
+
+ return nil
+}
+
+func (t *Tree) GobEncode() ([]byte, error) {
+ data := &treeGob{
+ Config: t.config,
+ Children: t.children,
+ Name: t.name,
+ Path: t.path,
+ }
+
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ if err := enc.Encode(data); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+// treeGob is used as a structure to Gob encode a tree.
+//
+// This structure is private so it can't be referenced but the fields are
+// public, allowing Gob to properly encode this. When we decode this, we are
+// able to turn it into a Tree.
+type treeGob struct {
+ Config *config.Config
+ Children map[string]*Tree
+ Name string
+ Path []string
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go
new file mode 100644
index 00000000..090d4f7e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go
@@ -0,0 +1,118 @@
+package module
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// validateProviderAlias validates that all provider alias references are
+// defined at some point in the parent tree. This improves UX by catching
+// alias typos at the slight cost of requiring a declaration of usage. This
+// is usually a good tradeoff since not many aliases are used.
+func (t *Tree) validateProviderAlias() error {
+ // If we're not the root, don't perform this validation. We must be the
+ // root since we require full tree visibilty.
+ if len(t.path) != 0 {
+ return nil
+ }
+
+ // We'll use a graph to keep track of defined aliases at each level.
+ // As long as a parent defines an alias, it is okay.
+ var g dag.AcyclicGraph
+ t.buildProviderAliasGraph(&g, nil)
+
+ // Go through the graph and check that the usage is all good.
+ var err error
+ for _, v := range g.Vertices() {
+ pv, ok := v.(*providerAliasVertex)
+ if !ok {
+ // This shouldn't happen, just ignore it.
+ continue
+ }
+
+ // If we're not using any aliases, fast track and just continue
+ if len(pv.Used) == 0 {
+ continue
+ }
+
+ // Grab the ancestors since we're going to have to check if our
+ // parents define any of our aliases.
+ var parents []*providerAliasVertex
+ ancestors, _ := g.Ancestors(v)
+ for _, raw := range ancestors.List() {
+ if pv, ok := raw.(*providerAliasVertex); ok {
+ parents = append(parents, pv)
+ }
+ }
+ for k, _ := range pv.Used {
+ // Check if we define this
+ if _, ok := pv.Defined[k]; ok {
+ continue
+ }
+
+ // Check for a parent
+ found := false
+ for _, parent := range parents {
+ _, found = parent.Defined[k]
+ if found {
+ break
+ }
+ }
+ if found {
+ continue
+ }
+
+ // We didn't find the alias, error!
+ err = multierror.Append(err, fmt.Errorf(
+ "module %s: provider alias must be defined by the module or a parent: %s",
+ strings.Join(pv.Path, "."), k))
+ }
+ }
+
+ return err
+}
+
+func (t *Tree) buildProviderAliasGraph(g *dag.AcyclicGraph, parent dag.Vertex) {
+ // Add all our defined aliases
+ defined := make(map[string]struct{})
+ for _, p := range t.config.ProviderConfigs {
+ defined[p.FullName()] = struct{}{}
+ }
+
+ // Add all our used aliases
+ used := make(map[string]struct{})
+ for _, r := range t.config.Resources {
+ if r.Provider != "" {
+ used[r.Provider] = struct{}{}
+ }
+ }
+
+ // Add it to the graph
+ vertex := &providerAliasVertex{
+ Path: t.Path(),
+ Defined: defined,
+ Used: used,
+ }
+ g.Add(vertex)
+
+ // Connect to our parent if we have one
+ if parent != nil {
+ g.Connect(dag.BasicEdge(vertex, parent))
+ }
+
+ // Build all our children
+ for _, c := range t.Children() {
+ c.buildProviderAliasGraph(g, vertex)
+ }
+}
+
+// providerAliasVertex is the vertex for the graph that keeps track of
+// defined provider aliases.
+type providerAliasVertex struct {
+ Path []string
+ Defined map[string]struct{}
+ Used map[string]struct{}
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go
new file mode 100644
index 00000000..00fd43fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go
@@ -0,0 +1,40 @@
+package config
+
+// ProvisionerWhen is an enum for valid values for when to run provisioners.
+type ProvisionerWhen int
+
+const (
+ ProvisionerWhenInvalid ProvisionerWhen = iota
+ ProvisionerWhenCreate
+ ProvisionerWhenDestroy
+)
+
+var provisionerWhenStrs = map[ProvisionerWhen]string{
+ ProvisionerWhenInvalid: "invalid",
+ ProvisionerWhenCreate: "create",
+ ProvisionerWhenDestroy: "destroy",
+}
+
+func (v ProvisionerWhen) String() string {
+ return provisionerWhenStrs[v]
+}
+
+// ProvisionerOnFailure is an enum for valid values for on_failure options
+// for provisioners.
+type ProvisionerOnFailure int
+
+const (
+ ProvisionerOnFailureInvalid ProvisionerOnFailure = iota
+ ProvisionerOnFailureContinue
+ ProvisionerOnFailureFail
+)
+
+var provisionerOnFailureStrs = map[ProvisionerOnFailure]string{
+ ProvisionerOnFailureInvalid: "invalid",
+ ProvisionerOnFailureContinue: "continue",
+ ProvisionerOnFailureFail: "fail",
+}
+
+func (v ProvisionerOnFailure) String() string {
+ return provisionerOnFailureStrs[v]
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go
new file mode 100644
index 00000000..f8498d85
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/raw_config.go
@@ -0,0 +1,335 @@
+package config
+
+import (
+ "bytes"
+ "encoding/gob"
+ "sync"
+
+ "github.com/hashicorp/hil"
+ "github.com/hashicorp/hil/ast"
+ "github.com/mitchellh/copystructure"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// UnknownVariableValue is a sentinel value that can be used
+// to denote that the value of a variable is unknown at this time.
+// RawConfig uses this information to build up data about
+// unknown keys.
+const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
+
+// RawConfig is a structure that holds a piece of configuration
+// where the overall structure is unknown since it will be used
+// to configure a plugin or some other similar external component.
+//
+// RawConfigs can be interpolated with variables that come from
+// other resources, user variables, etc.
+//
+// RawConfig supports a query-like interface to request
+// information from deep within the structure.
+type RawConfig struct {
+ Key string
+ Raw map[string]interface{}
+ Interpolations []ast.Node
+ Variables map[string]InterpolatedVariable
+
+ lock sync.Mutex
+ config map[string]interface{}
+ unknownKeys []string
+}
+
+// NewRawConfig creates a new RawConfig structure and populates the
+// publicly readable struct fields.
+func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) {
+ result := &RawConfig{Raw: raw}
+ if err := result.init(); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// RawMap returns a copy of the RawConfig.Raw map.
+func (r *RawConfig) RawMap() map[string]interface{} {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ m := make(map[string]interface{})
+ for k, v := range r.Raw {
+ m[k] = v
+ }
+ return m
+}
+
+// Copy returns a copy of this RawConfig, uninterpolated.
+func (r *RawConfig) Copy() *RawConfig {
+ if r == nil {
+ return nil
+ }
+
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ newRaw := make(map[string]interface{})
+ for k, v := range r.Raw {
+ newRaw[k] = v
+ }
+
+ result, err := NewRawConfig(newRaw)
+ if err != nil {
+ panic("copy failed: " + err.Error())
+ }
+
+ result.Key = r.Key
+ return result
+}
+
+// Value returns the value of the configuration if this configuration
+// has a Key set. If this does not have a Key set, nil will be returned.
+func (r *RawConfig) Value() interface{} {
+ if c := r.Config(); c != nil {
+ if v, ok := c[r.Key]; ok {
+ return v
+ }
+ }
+
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ return r.Raw[r.Key]
+}
+
+// Config returns the entire configuration with the variables
+// interpolated from any call to Interpolate.
+//
+// If any interpolated variables are unknown (value set to
+// UnknownVariableValue), the first non-container (map, slice, etc.) element
+// will be removed from the config. The keys of unknown variables
+// can be found using the UnknownKeys function.
+//
+// By pruning out unknown keys from the configuration, the raw
+// structure will always successfully decode into its ultimate
+// structure using something like mapstructure.
+func (r *RawConfig) Config() map[string]interface{} {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ return r.config
+}
+
+// Interpolate uses the given mapping of variable values and uses
+// those as the values to replace any variables in this raw
+// configuration.
+//
+// Any prior calls to Interpolate are replaced with this one.
+//
+// If a variable key is missing, this will panic.
+func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ config := langEvalConfig(vs)
+ return r.interpolate(func(root ast.Node) (interface{}, error) {
+ // None of the variables we need are computed, meaning we should
+ // be able to properly evaluate.
+ result, err := hil.Eval(root, config)
+ if err != nil {
+ return "", err
+ }
+
+ return result.Value, nil
+ })
+}
+
+// Merge merges another RawConfig into this one (overriding any conflicting
+// values in this config) and returns a new config. The original config
+// is not modified.
+func (r *RawConfig) Merge(other *RawConfig) *RawConfig {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ // Merge the raw configurations
+ raw := make(map[string]interface{})
+ for k, v := range r.Raw {
+ raw[k] = v
+ }
+ for k, v := range other.Raw {
+ raw[k] = v
+ }
+
+ // Create the result
+ result, err := NewRawConfig(raw)
+ if err != nil {
+ panic(err)
+ }
+
+ // Merge the interpolated results
+ result.config = make(map[string]interface{})
+ for k, v := range r.config {
+ result.config[k] = v
+ }
+ for k, v := range other.config {
+ result.config[k] = v
+ }
+
+ // Build the unknown keys
+ if len(r.unknownKeys) > 0 || len(other.unknownKeys) > 0 {
+ unknownKeys := make(map[string]struct{})
+ for _, k := range r.unknownKeys {
+ unknownKeys[k] = struct{}{}
+ }
+ for _, k := range other.unknownKeys {
+ unknownKeys[k] = struct{}{}
+ }
+
+ result.unknownKeys = make([]string, 0, len(unknownKeys))
+ for k, _ := range unknownKeys {
+ result.unknownKeys = append(result.unknownKeys, k)
+ }
+ }
+
+ return result
+}
+
+func (r *RawConfig) init() error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ r.config = r.Raw
+ r.Interpolations = nil
+ r.Variables = nil
+
+ fn := func(node ast.Node) (interface{}, error) {
+ r.Interpolations = append(r.Interpolations, node)
+ vars, err := DetectVariables(node)
+ if err != nil {
+ return "", err
+ }
+
+ for _, v := range vars {
+ if r.Variables == nil {
+ r.Variables = make(map[string]InterpolatedVariable)
+ }
+
+ r.Variables[v.FullKey()] = v
+ }
+
+ return "", nil
+ }
+
+ walker := &interpolationWalker{F: fn}
+ if err := reflectwalk.Walk(r.Raw, walker); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error {
+ config, err := copystructure.Copy(r.Raw)
+ if err != nil {
+ return err
+ }
+ r.config = config.(map[string]interface{})
+
+ w := &interpolationWalker{F: fn, Replace: true}
+ err = reflectwalk.Walk(r.config, w)
+ if err != nil {
+ return err
+ }
+
+ r.unknownKeys = w.unknownKeys
+ return nil
+}
+
+func (r *RawConfig) merge(r2 *RawConfig) *RawConfig {
+ if r == nil && r2 == nil {
+ return nil
+ }
+
+ if r == nil {
+ r = &RawConfig{}
+ }
+
+ rawRaw, err := copystructure.Copy(r.Raw)
+ if err != nil {
+ panic(err)
+ }
+
+ raw := rawRaw.(map[string]interface{})
+ if r2 != nil {
+ for k, v := range r2.Raw {
+ raw[k] = v
+ }
+ }
+
+ result, err := NewRawConfig(raw)
+ if err != nil {
+ panic(err)
+ }
+
+ return result
+}
+
+// UnknownKeys returns the keys of the configuration that are unknown
+// because they had interpolated variables that must be computed.
+func (r *RawConfig) UnknownKeys() []string {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ return r.unknownKeys
+}
+
+// See GobEncode
+func (r *RawConfig) GobDecode(b []byte) error {
+ var data gobRawConfig
+ err := gob.NewDecoder(bytes.NewReader(b)).Decode(&data)
+ if err != nil {
+ return err
+ }
+
+ r.Key = data.Key
+ r.Raw = data.Raw
+
+ return r.init()
+}
+
+// GobEncode is a custom Gob encoder to use so that we only include the
+// raw configuration. Interpolated variables and such are lost and the
+// tree of interpolated variables is recomputed on decode, since it is
+// referentially transparent.
+func (r *RawConfig) GobEncode() ([]byte, error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ data := gobRawConfig{
+ Key: r.Key,
+ Raw: r.Raw,
+ }
+
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(data); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+type gobRawConfig struct {
+ Key string
+ Raw map[string]interface{}
+}
+
+// langEvalConfig returns the evaluation configuration we use to execute.
+func langEvalConfig(vs map[string]ast.Variable) *hil.EvalConfig {
+ funcMap := make(map[string]ast.Function)
+ for k, v := range Funcs() {
+ funcMap[k] = v
+ }
+ funcMap["lookup"] = interpolationFuncLookup(vs)
+ funcMap["keys"] = interpolationFuncKeys(vs)
+ funcMap["values"] = interpolationFuncValues(vs)
+
+ return &hil.EvalConfig{
+ GlobalScope: &ast.BasicScope{
+ VarMap: vs,
+ FuncMap: funcMap,
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode.go b/vendor/github.com/hashicorp/terraform/config/resource_mode.go
new file mode 100644
index 00000000..877c6e84
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/resource_mode.go
@@ -0,0 +1,9 @@
+package config
+
+//go:generate stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go
+type ResourceMode int
+
+const (
+ ManagedResourceMode ResourceMode = iota
+ DataResourceMode
+)
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
new file mode 100644
index 00000000..ea68b4fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT.
+
+package config
+
+import "fmt"
+
+const _ResourceMode_name = "ManagedResourceModeDataResourceMode"
+
+var _ResourceMode_index = [...]uint8{0, 19, 35}
+
+func (i ResourceMode) String() string {
+ if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) {
+ return fmt.Sprintf("ResourceMode(%d)", i)
+ }
+ return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go
new file mode 100644
index 00000000..f7bfadd9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/testing.go
@@ -0,0 +1,15 @@
+package config
+
+import (
+ "testing"
+)
+
+// TestRawConfig is used to create a RawConfig for testing.
+func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig {
+ cfg, err := NewRawConfig(c)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ return cfg
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go
new file mode 100644
index 00000000..f8776bc5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/dag.go
@@ -0,0 +1,286 @@
+package dag
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+)
+
+// AcyclicGraph is a specialization of Graph that cannot have cycles. With
+// this property, we get the property of sane graph traversal.
+type AcyclicGraph struct {
+ Graph
+}
+
+// WalkFunc is the callback used for walking the graph.
+type WalkFunc func(Vertex) error
+
+// DepthWalkFunc is a walk function that also receives the current depth of the
+// walk as an argument
+type DepthWalkFunc func(Vertex, int) error
+
+func (g *AcyclicGraph) DirectedGraph() Grapher {
+ return g
+}
+
+// Returns a Set that includes every Vertex yielded by walking down from the
+// provided starting Vertex v.
+func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) {
+ s := new(Set)
+ start := AsVertexList(g.DownEdges(v))
+ memoFunc := func(v Vertex, d int) error {
+ s.Add(v)
+ return nil
+ }
+
+ if err := g.DepthFirstWalk(start, memoFunc); err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+// Returns a Set that includes every Vertex yielded by walking up from the
+// provided starting Vertex v.
+func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) {
+ s := new(Set)
+ start := AsVertexList(g.UpEdges(v))
+ memoFunc := func(v Vertex, d int) error {
+ s.Add(v)
+ return nil
+ }
+
+ if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+// Root returns the root of the DAG, or an error.
+//
+// Complexity: O(V)
+func (g *AcyclicGraph) Root() (Vertex, error) {
+ roots := make([]Vertex, 0, 1)
+ for _, v := range g.Vertices() {
+ if g.UpEdges(v).Len() == 0 {
+ roots = append(roots, v)
+ }
+ }
+
+ if len(roots) > 1 {
+ // TODO(mitchellh): make this error message a lot better
+ return nil, fmt.Errorf("multiple roots: %#v", roots)
+ }
+
+ if len(roots) == 0 {
+ return nil, fmt.Errorf("no roots found")
+ }
+
+ return roots[0], nil
+}
+
+// TransitiveReduction performs the transitive reduction of graph g in place.
+// The transitive reduction of a graph is a graph with as few edges as
+// possible with the same reachability as the original graph. This means
+// that if there are three nodes A => B => C, and A connects to both
+// B and C, and B connects to C, then the transitive reduction is the
+// same graph with only a single edge between A and B, and a single edge
+// between B and C.
+//
+// The graph must be valid for this operation to behave properly. If
+// Validate() returns an error, the behavior is undefined and the results
+// will likely be unexpected.
+//
+// Complexity: O(V(V+E)), or asymptotically O(VE)
+func (g *AcyclicGraph) TransitiveReduction() {
+ // For each vertex u in graph g, do a DFS starting from each vertex
+ // v such that the edge (u,v) exists (v is a direct descendant of u).
+ //
+ // For each v-prime reachable from v, remove the edge (u, v-prime).
+ defer g.debug.BeginOperation("TransitiveReduction", "").End("")
+
+ for _, u := range g.Vertices() {
+ uTargets := g.DownEdges(u)
+ vs := AsVertexList(g.DownEdges(u))
+
+ g.DepthFirstWalk(vs, func(v Vertex, d int) error {
+ shared := uTargets.Intersection(g.DownEdges(v))
+ for _, vPrime := range AsVertexList(shared) {
+ g.RemoveEdge(BasicEdge(u, vPrime))
+ }
+
+ return nil
+ })
+ }
+}
+
+// Validate validates the DAG. A DAG is valid if it has a single root
+// with no cycles.
+func (g *AcyclicGraph) Validate() error {
+ if _, err := g.Root(); err != nil {
+ return err
+ }
+
+ // Look for cycles of more than 1 component
+ var err error
+ cycles := g.Cycles()
+ if len(cycles) > 0 {
+ for _, cycle := range cycles {
+ cycleStr := make([]string, len(cycle))
+ for j, vertex := range cycle {
+ cycleStr[j] = VertexName(vertex)
+ }
+
+ err = multierror.Append(err, fmt.Errorf(
+ "Cycle: %s", strings.Join(cycleStr, ", ")))
+ }
+ }
+
+ // Look for cycles to self
+ for _, e := range g.Edges() {
+ if e.Source() == e.Target() {
+ err = multierror.Append(err, fmt.Errorf(
+ "Self reference: %s", VertexName(e.Source())))
+ }
+ }
+
+ return err
+}
+
+func (g *AcyclicGraph) Cycles() [][]Vertex {
+ var cycles [][]Vertex
+ for _, cycle := range StronglyConnected(&g.Graph) {
+ if len(cycle) > 1 {
+ cycles = append(cycles, cycle)
+ }
+ }
+ return cycles
+}
+
+// Walk walks the graph, calling your callback as each node is visited.
+// This will walk nodes in parallel if it can. Because the walk is done
+// in parallel, the error returned will be a multierror.
+func (g *AcyclicGraph) Walk(cb WalkFunc) error {
+ defer g.debug.BeginOperation(typeWalk, "").End("")
+
+ w := &Walker{Callback: cb, Reverse: true}
+ w.Update(g)
+ return w.Wait()
+}
+
+// simple convenience helper for converting a dag.Set to a []Vertex
+func AsVertexList(s *Set) []Vertex {
+ rawList := s.List()
+ vertexList := make([]Vertex, len(rawList))
+ for i, raw := range rawList {
+ vertexList[i] = raw.(Vertex)
+ }
+ return vertexList
+}
+
+type vertexAtDepth struct {
+ Vertex Vertex
+ Depth int
+}
+
+// depthFirstWalk does a depth-first walk of the graph starting from
+// the vertices in start. This is not exported now but it would make sense
+// to export this publicly at some point.
+func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error {
+ defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("")
+
+ seen := make(map[Vertex]struct{})
+ frontier := make([]*vertexAtDepth, len(start))
+ for i, v := range start {
+ frontier[i] = &vertexAtDepth{
+ Vertex: v,
+ Depth: 0,
+ }
+ }
+ for len(frontier) > 0 {
+ // Pop the current vertex
+ n := len(frontier)
+ current := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ // Check if we've seen this already and return...
+ if _, ok := seen[current.Vertex]; ok {
+ continue
+ }
+ seen[current.Vertex] = struct{}{}
+
+ // Visit the current node
+ if err := f(current.Vertex, current.Depth); err != nil {
+ return err
+ }
+
+ // Visit targets of this in a consistent order.
+ targets := AsVertexList(g.DownEdges(current.Vertex))
+ sort.Sort(byVertexName(targets))
+ for _, t := range targets {
+ frontier = append(frontier, &vertexAtDepth{
+ Vertex: t,
+ Depth: current.Depth + 1,
+ })
+ }
+ }
+
+ return nil
+}
+
+// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from
+// the vertices in start.
+func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error {
+ defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("")
+
+ seen := make(map[Vertex]struct{})
+ frontier := make([]*vertexAtDepth, len(start))
+ for i, v := range start {
+ frontier[i] = &vertexAtDepth{
+ Vertex: v,
+ Depth: 0,
+ }
+ }
+ for len(frontier) > 0 {
+ // Pop the current vertex
+ n := len(frontier)
+ current := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ // Check if we've seen this already and return...
+ if _, ok := seen[current.Vertex]; ok {
+ continue
+ }
+ seen[current.Vertex] = struct{}{}
+
+ // Add next set of targets in a consistent order.
+ targets := AsVertexList(g.UpEdges(current.Vertex))
+ sort.Sort(byVertexName(targets))
+ for _, t := range targets {
+ frontier = append(frontier, &vertexAtDepth{
+ Vertex: t,
+ Depth: current.Depth + 1,
+ })
+ }
+
+ // Visit the current node
+ if err := f(current.Vertex, current.Depth); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// byVertexName implements sort.Interface so a list of Vertices can be sorted
+// consistently by their VertexName
+type byVertexName []Vertex
+
+func (b byVertexName) Len() int { return len(b) }
+func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byVertexName) Less(i, j int) bool {
+ return VertexName(b[i]) < VertexName(b[j])
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/dot.go b/vendor/github.com/hashicorp/terraform/dag/dot.go
new file mode 100644
index 00000000..7e6d2af3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/dot.go
@@ -0,0 +1,282 @@
+package dag
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// DotOpts are the options for generating a dot formatted Graph.
+type DotOpts struct {
+ // Allows some nodes to decide to only show themselves when the user has
+ // requested the "verbose" graph.
+ Verbose bool
+
+ // Highlight Cycles
+ DrawCycles bool
+
+ // How many levels to expand modules as we draw
+ MaxDepth int
+
+ // use this to keep the cluster_ naming convention from the previous dot writer
+ cluster bool
+}
+
+// GraphNodeDotter can be implemented by a node to cause it to be included
+// in the dot graph. The Dot method will be called which is expected to
+// return a representation of this node.
+type GraphNodeDotter interface {
+ // Dot is called to return the dot formatting for the node.
+ // The first parameter is the title of the node.
+ // The second parameter includes user-specified options that affect the dot
+ // graph. See GraphDotOpts below for details.
+ DotNode(string, *DotOpts) *DotNode
+}
+
+// DotNode provides a structure for Vertices to return in order to specify their
+// dot format.
+type DotNode struct {
+ Name string
+ Attrs map[string]string
+}
+
+// Returns the DOT representation of this Graph.
+func (g *marshalGraph) Dot(opts *DotOpts) []byte {
+ if opts == nil {
+ opts = &DotOpts{
+ DrawCycles: true,
+ MaxDepth: -1,
+ Verbose: true,
+ }
+ }
+
+ var w indentWriter
+ w.WriteString("digraph {\n")
+ w.Indent()
+
+ // some dot defaults
+ w.WriteString(`compound = "true"` + "\n")
+ w.WriteString(`newrank = "true"` + "\n")
+
+ // the top level graph is written as the first subgraph
+ w.WriteString(`subgraph "root" {` + "\n")
+ g.writeBody(opts, &w)
+
+ // cluster isn't really used other than for naming purposes in some graphs
+ opts.cluster = opts.MaxDepth != 0
+ maxDepth := opts.MaxDepth
+ if maxDepth == 0 {
+ maxDepth = -1
+ }
+
+ for _, s := range g.Subgraphs {
+ g.writeSubgraph(s, opts, maxDepth, &w)
+ }
+
+ w.Unindent()
+ w.WriteString("}\n")
+ return w.Bytes()
+}
+
+func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte {
+ var buf bytes.Buffer
+ graphName := g.Name
+ if graphName == "" {
+ graphName = "root"
+ }
+
+ name := v.Name
+ attrs := v.Attrs
+ if v.graphNodeDotter != nil {
+ node := v.graphNodeDotter.DotNode(name, opts)
+ if node == nil {
+ return []byte{}
+ }
+
+ newAttrs := make(map[string]string)
+ for k, v := range attrs {
+ newAttrs[k] = v
+ }
+ for k, v := range node.Attrs {
+ newAttrs[k] = v
+ }
+
+ name = node.Name
+ attrs = newAttrs
+ }
+
+ buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name))
+ writeAttrs(&buf, attrs)
+ buf.WriteByte('\n')
+
+ return buf.Bytes()
+}
+
+func (e *marshalEdge) dot(g *marshalGraph) string {
+ var buf bytes.Buffer
+ graphName := g.Name
+ if graphName == "" {
+ graphName = "root"
+ }
+
+ sourceName := g.vertexByID(e.Source).Name
+ targetName := g.vertexByID(e.Target).Name
+ s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName)
+ buf.WriteString(s)
+ writeAttrs(&buf, e.Attrs)
+
+ return buf.String()
+}
+
+func cycleDot(e *marshalEdge, g *marshalGraph) string {
+ return e.dot(g) + ` [color = "red", penwidth = "2.0"]`
+}
+
+// Write the subgraph body. The is recursive, and the depth argument is used to
+// record the current depth of iteration.
+func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) {
+ if depth == 0 {
+ return
+ }
+ depth--
+
+ name := sg.Name
+ if opts.cluster {
+ // we prefix with cluster_ to match the old dot output
+ name = "cluster_" + name
+ sg.Attrs["label"] = sg.Name
+ }
+ w.WriteString(fmt.Sprintf("subgraph %q {\n", name))
+ sg.writeBody(opts, w)
+
+ for _, sg := range sg.Subgraphs {
+ g.writeSubgraph(sg, opts, depth, w)
+ }
+}
+
+func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) {
+ w.Indent()
+
+ for _, as := range attrStrings(g.Attrs) {
+ w.WriteString(as + "\n")
+ }
+
+ // list of Vertices that aren't to be included in the dot output
+ skip := map[string]bool{}
+
+ for _, v := range g.Vertices {
+ if v.graphNodeDotter == nil {
+ skip[v.ID] = true
+ continue
+ }
+
+ w.Write(v.dot(g, opts))
+ }
+
+ var dotEdges []string
+
+ if opts.DrawCycles {
+ for _, c := range g.Cycles {
+ if len(c) < 2 {
+ continue
+ }
+
+ for i, j := 0, 1; i < len(c); i, j = i+1, j+1 {
+ if j >= len(c) {
+ j = 0
+ }
+ src := c[i]
+ tgt := c[j]
+
+ if skip[src.ID] || skip[tgt.ID] {
+ continue
+ }
+
+ e := &marshalEdge{
+ Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name),
+ Source: src.ID,
+ Target: tgt.ID,
+ Attrs: make(map[string]string),
+ }
+
+ dotEdges = append(dotEdges, cycleDot(e, g))
+ src = tgt
+ }
+ }
+ }
+
+ for _, e := range g.Edges {
+ dotEdges = append(dotEdges, e.dot(g))
+ }
+
+ // srot these again to match the old output
+ sort.Strings(dotEdges)
+
+ for _, e := range dotEdges {
+ w.WriteString(e + "\n")
+ }
+
+ w.Unindent()
+ w.WriteString("}\n")
+}
+
+func writeAttrs(buf *bytes.Buffer, attrs map[string]string) {
+ if len(attrs) > 0 {
+ buf.WriteString(" [")
+ buf.WriteString(strings.Join(attrStrings(attrs), ", "))
+ buf.WriteString("]")
+ }
+}
+
+func attrStrings(attrs map[string]string) []string {
+ strings := make([]string, 0, len(attrs))
+ for k, v := range attrs {
+ strings = append(strings, fmt.Sprintf("%s = %q", k, v))
+ }
+ sort.Strings(strings)
+ return strings
+}
+
+// Provide a bytes.Buffer like structure, which will indent when starting a
+// newline.
+type indentWriter struct {
+ bytes.Buffer
+ level int
+}
+
+func (w *indentWriter) indent() {
+ newline := []byte("\n")
+ if !bytes.HasSuffix(w.Bytes(), newline) {
+ return
+ }
+ for i := 0; i < w.level; i++ {
+ w.Buffer.WriteString("\t")
+ }
+}
+
+// Indent increases indentation by 1
+func (w *indentWriter) Indent() { w.level++ }
+
+// Unindent decreases indentation by 1
+func (w *indentWriter) Unindent() { w.level-- }
+
+// the following methods intercecpt the byte.Buffer writes and insert the
+// indentation when starting a new line.
+func (w *indentWriter) Write(b []byte) (int, error) {
+ w.indent()
+ return w.Buffer.Write(b)
+}
+
+func (w *indentWriter) WriteString(s string) (int, error) {
+ w.indent()
+ return w.Buffer.WriteString(s)
+}
+func (w *indentWriter) WriteByte(b byte) error {
+ w.indent()
+ return w.Buffer.WriteByte(b)
+}
+func (w *indentWriter) WriteRune(r rune) (int, error) {
+ w.indent()
+ return w.Buffer.WriteRune(r)
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/edge.go b/vendor/github.com/hashicorp/terraform/dag/edge.go
new file mode 100644
index 00000000..f0d99ee3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/edge.go
@@ -0,0 +1,37 @@
+package dag
+
+import (
+ "fmt"
+)
+
+// Edge represents an edge in the graph, with a source and target vertex.
+type Edge interface {
+ Source() Vertex
+ Target() Vertex
+
+ Hashable
+}
+
+// BasicEdge returns an Edge implementation that simply tracks the source
+// and target given as-is.
+func BasicEdge(source, target Vertex) Edge {
+ return &basicEdge{S: source, T: target}
+}
+
+// basicEdge is a basic implementation of Edge that has the source and
+// target vertex.
+type basicEdge struct {
+ S, T Vertex
+}
+
+func (e *basicEdge) Hashcode() interface{} {
+ return fmt.Sprintf("%p-%p", e.S, e.T)
+}
+
+func (e *basicEdge) Source() Vertex {
+ return e.S
+}
+
+func (e *basicEdge) Target() Vertex {
+ return e.T
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/graph.go b/vendor/github.com/hashicorp/terraform/dag/graph.go
new file mode 100644
index 00000000..e7517a20
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/graph.go
@@ -0,0 +1,391 @@
+package dag
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+)
+
+// Graph is used to represent a dependency graph.
+type Graph struct {
+ vertices *Set
+ edges *Set
+ downEdges map[interface{}]*Set
+ upEdges map[interface{}]*Set
+
+ // JSON encoder for recording debug information
+ debug *encoder
+}
+
+// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher.
+type Subgrapher interface {
+ Subgraph() Grapher
+}
+
+// A Grapher is any type that returns a Grapher, mainly used to identify
+// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they
+// return themselves.
+type Grapher interface {
+ DirectedGraph() Grapher
+}
+
+// Vertex of the graph.
+type Vertex interface{}
+
+// NamedVertex is an optional interface that can be implemented by Vertex
+// to give it a human-friendly name that is used for outputting the graph.
+type NamedVertex interface {
+ Vertex
+ Name() string
+}
+
+func (g *Graph) DirectedGraph() Grapher {
+ return g
+}
+
+// Vertices returns the list of all the vertices in the graph.
+func (g *Graph) Vertices() []Vertex {
+ list := g.vertices.List()
+ result := make([]Vertex, len(list))
+ for i, v := range list {
+ result[i] = v.(Vertex)
+ }
+
+ return result
+}
+
+// Edges returns the list of all the edges in the graph.
+func (g *Graph) Edges() []Edge {
+ list := g.edges.List()
+ result := make([]Edge, len(list))
+ for i, v := range list {
+ result[i] = v.(Edge)
+ }
+
+ return result
+}
+
+// EdgesFrom returns the list of edges from the given source.
+func (g *Graph) EdgesFrom(v Vertex) []Edge {
+ var result []Edge
+ from := hashcode(v)
+ for _, e := range g.Edges() {
+ if hashcode(e.Source()) == from {
+ result = append(result, e)
+ }
+ }
+
+ return result
+}
+
+// EdgesTo returns the list of edges to the given target.
+func (g *Graph) EdgesTo(v Vertex) []Edge {
+ var result []Edge
+ search := hashcode(v)
+ for _, e := range g.Edges() {
+ if hashcode(e.Target()) == search {
+ result = append(result, e)
+ }
+ }
+
+ return result
+}
+
+// HasVertex checks if the given Vertex is present in the graph.
+func (g *Graph) HasVertex(v Vertex) bool {
+ return g.vertices.Include(v)
+}
+
+// HasEdge checks if the given Edge is present in the graph.
+func (g *Graph) HasEdge(e Edge) bool {
+ return g.edges.Include(e)
+}
+
+// Add adds a vertex to the graph. This is safe to call multiple time with
+// the same Vertex.
+func (g *Graph) Add(v Vertex) Vertex {
+ g.init()
+ g.vertices.Add(v)
+ g.debug.Add(v)
+ return v
+}
+
+// Remove removes a vertex from the graph. This will also remove any
+// edges with this vertex as a source or target.
+func (g *Graph) Remove(v Vertex) Vertex {
+ // Delete the vertex itself
+ g.vertices.Delete(v)
+ g.debug.Remove(v)
+
+ // Delete the edges to non-existent things
+ for _, target := range g.DownEdges(v).List() {
+ g.RemoveEdge(BasicEdge(v, target))
+ }
+ for _, source := range g.UpEdges(v).List() {
+ g.RemoveEdge(BasicEdge(source, v))
+ }
+
+ return nil
+}
+
+// Replace replaces the original Vertex with replacement. If the original
+// does not exist within the graph, then false is returned. Otherwise, true
+// is returned.
+func (g *Graph) Replace(original, replacement Vertex) bool {
+ // If we don't have the original, we can't do anything
+ if !g.vertices.Include(original) {
+ return false
+ }
+
+ defer g.debug.BeginOperation("Replace", "").End("")
+
+ // If they're the same, then don't do anything
+ if original == replacement {
+ return true
+ }
+
+ // Add our new vertex, then copy all the edges
+ g.Add(replacement)
+ for _, target := range g.DownEdges(original).List() {
+ g.Connect(BasicEdge(replacement, target))
+ }
+ for _, source := range g.UpEdges(original).List() {
+ g.Connect(BasicEdge(source, replacement))
+ }
+
+ // Remove our old vertex, which will also remove all the edges
+ g.Remove(original)
+
+ return true
+}
+
+// RemoveEdge removes an edge from the graph.
+func (g *Graph) RemoveEdge(edge Edge) {
+ g.init()
+ g.debug.RemoveEdge(edge)
+
+ // Delete the edge from the set
+ g.edges.Delete(edge)
+
+ // Delete the up/down edges
+ if s, ok := g.downEdges[hashcode(edge.Source())]; ok {
+ s.Delete(edge.Target())
+ }
+ if s, ok := g.upEdges[hashcode(edge.Target())]; ok {
+ s.Delete(edge.Source())
+ }
+}
+
+// DownEdges returns the outward edges from the source Vertex v.
+func (g *Graph) DownEdges(v Vertex) *Set {
+ g.init()
+ return g.downEdges[hashcode(v)]
+}
+
+// UpEdges returns the inward edges to the destination Vertex v.
+func (g *Graph) UpEdges(v Vertex) *Set {
+ g.init()
+ return g.upEdges[hashcode(v)]
+}
+
+// Connect adds an edge with the given source and target. This is safe to
+// call multiple times with the same value. Note that the same value is
+// verified through pointer equality of the vertices, not through the
+// value of the edge itself.
+func (g *Graph) Connect(edge Edge) {
+ g.init()
+ g.debug.Connect(edge)
+
+ source := edge.Source()
+ target := edge.Target()
+ sourceCode := hashcode(source)
+ targetCode := hashcode(target)
+
+ // Do we have this already? If so, don't add it again.
+ if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) {
+ return
+ }
+
+ // Add the edge to the set
+ g.edges.Add(edge)
+
+ // Add the down edge
+ s, ok := g.downEdges[sourceCode]
+ if !ok {
+ s = new(Set)
+ g.downEdges[sourceCode] = s
+ }
+ s.Add(target)
+
+ // Add the up edge
+ s, ok = g.upEdges[targetCode]
+ if !ok {
+ s = new(Set)
+ g.upEdges[targetCode] = s
+ }
+ s.Add(source)
+}
+
+// String outputs some human-friendly output for the graph structure.
+func (g *Graph) StringWithNodeTypes() string {
+ var buf bytes.Buffer
+
+ // Build the list of node names and a mapping so that we can more
+ // easily alphabetize the output to remain deterministic.
+ vertices := g.Vertices()
+ names := make([]string, 0, len(vertices))
+ mapping := make(map[string]Vertex, len(vertices))
+ for _, v := range vertices {
+ name := VertexName(v)
+ names = append(names, name)
+ mapping[name] = v
+ }
+ sort.Strings(names)
+
+ // Write each node in order...
+ for _, name := range names {
+ v := mapping[name]
+ targets := g.downEdges[hashcode(v)]
+
+ buf.WriteString(fmt.Sprintf("%s - %T\n", name, v))
+
+ // Alphabetize dependencies
+ deps := make([]string, 0, targets.Len())
+ targetNodes := make(map[string]Vertex)
+ for _, target := range targets.List() {
+ dep := VertexName(target)
+ deps = append(deps, dep)
+ targetNodes[dep] = target
+ }
+ sort.Strings(deps)
+
+ // Write dependencies
+ for _, d := range deps {
+ buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d]))
+ }
+ }
+
+ return buf.String()
+}
+
+// String outputs some human-friendly output for the graph structure.
+func (g *Graph) String() string {
+ var buf bytes.Buffer
+
+ // Build the list of node names and a mapping so that we can more
+ // easily alphabetize the output to remain deterministic.
+ vertices := g.Vertices()
+ names := make([]string, 0, len(vertices))
+ mapping := make(map[string]Vertex, len(vertices))
+ for _, v := range vertices {
+ name := VertexName(v)
+ names = append(names, name)
+ mapping[name] = v
+ }
+ sort.Strings(names)
+
+ // Write each node in order...
+ for _, name := range names {
+ v := mapping[name]
+ targets := g.downEdges[hashcode(v)]
+
+ buf.WriteString(fmt.Sprintf("%s\n", name))
+
+ // Alphabetize dependencies
+ deps := make([]string, 0, targets.Len())
+ for _, target := range targets.List() {
+ deps = append(deps, VertexName(target))
+ }
+ sort.Strings(deps)
+
+ // Write dependencies
+ for _, d := range deps {
+ buf.WriteString(fmt.Sprintf(" %s\n", d))
+ }
+ }
+
+ return buf.String()
+}
+
+func (g *Graph) init() {
+ if g.vertices == nil {
+ g.vertices = new(Set)
+ }
+ if g.edges == nil {
+ g.edges = new(Set)
+ }
+ if g.downEdges == nil {
+ g.downEdges = make(map[interface{}]*Set)
+ }
+ if g.upEdges == nil {
+ g.upEdges = make(map[interface{}]*Set)
+ }
+}
+
+// Dot returns a dot-formatted representation of the Graph.
+func (g *Graph) Dot(opts *DotOpts) []byte {
+ return newMarshalGraph("", g).Dot(opts)
+}
+
+// MarshalJSON returns a JSON representation of the entire Graph.
+func (g *Graph) MarshalJSON() ([]byte, error) {
+ dg := newMarshalGraph("root", g)
+ return json.MarshalIndent(dg, "", " ")
+}
+
+// SetDebugWriter sets the io.Writer where the Graph will record debug
+// information. After this is set, the graph will immediately encode itself to
+// the stream, and continue to record all subsequent operations.
+func (g *Graph) SetDebugWriter(w io.Writer) {
+ g.debug = &encoder{w: w}
+ g.debug.Encode(newMarshalGraph("root", g))
+}
+
+// DebugVertexInfo encodes arbitrary information about a vertex in the graph
+// debug logs.
+func (g *Graph) DebugVertexInfo(v Vertex, info string) {
+ va := newVertexInfo(typeVertexInfo, v, info)
+ g.debug.Encode(va)
+}
+
+// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug
+// logs.
+func (g *Graph) DebugEdgeInfo(e Edge, info string) {
+ ea := newEdgeInfo(typeEdgeInfo, e, info)
+ g.debug.Encode(ea)
+}
+
+// DebugVisitInfo records a visit to a Vertex during a walk operation.
+func (g *Graph) DebugVisitInfo(v Vertex, info string) {
+ vi := newVertexInfo(typeVisitInfo, v, info)
+ g.debug.Encode(vi)
+}
+
+// DebugOperation marks the start of a set of graph transformations in
+// the debug log, and returns a DebugOperationEnd func, which marks the end of
+// the operation in the log. Additional information can be added to the log via
+// the info parameter.
+//
+// The returned func's End method allows this method to be called from a single
+// defer statement:
+// defer g.DebugOperationBegin("OpName", "operating").End("")
+//
+// The returned function must be called to properly close the logical operation
+// in the logs.
+func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd {
+ return g.debug.BeginOperation(operation, info)
+}
+
+// VertexName returns the name of a vertex.
+func VertexName(raw Vertex) string {
+ switch v := raw.(type) {
+ case NamedVertex:
+ return v.Name()
+ case fmt.Stringer:
+ return fmt.Sprintf("%s", v)
+ default:
+ return fmt.Sprintf("%v", v)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go
new file mode 100644
index 00000000..16d5dd6d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/marshal.go
@@ -0,0 +1,462 @@
+package dag
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+const (
+ typeOperation = "Operation"
+ typeTransform = "Transform"
+ typeWalk = "Walk"
+ typeDepthFirstWalk = "DepthFirstWalk"
+ typeReverseDepthFirstWalk = "ReverseDepthFirstWalk"
+ typeTransitiveReduction = "TransitiveReduction"
+ typeEdgeInfo = "EdgeInfo"
+ typeVertexInfo = "VertexInfo"
+ typeVisitInfo = "VisitInfo"
+)
+
+// the marshal* structs are for serialization of the graph data.
+type marshalGraph struct {
+ // Type is always "Graph", for identification as a top level object in the
+ // JSON stream.
+ Type string
+
+ // Each marshal structure requires a unique ID so that it can be referenced
+ // by other structures.
+ ID string `json:",omitempty"`
+
+ // Human readable name for this graph.
+ Name string `json:",omitempty"`
+
+ // Arbitrary attributes that can be added to the output.
+ Attrs map[string]string `json:",omitempty"`
+
+ // List of graph vertices, sorted by ID.
+ Vertices []*marshalVertex `json:",omitempty"`
+
+ // List of edges, sorted by Source ID.
+ Edges []*marshalEdge `json:",omitempty"`
+
+ // Any number of subgraphs. A subgraph itself is considered a vertex, and
+ // may be referenced by either end of an edge.
+ Subgraphs []*marshalGraph `json:",omitempty"`
+
+ // Any lists of vertices that are included in cycles.
+ Cycles [][]*marshalVertex `json:",omitempty"`
+}
+
+// The add, remove, connect, removeEdge methods mirror the basic Graph
+// manipulations to reconstruct a marshalGraph from a debug log.
+func (g *marshalGraph) add(v *marshalVertex) {
+ g.Vertices = append(g.Vertices, v)
+ sort.Sort(vertices(g.Vertices))
+}
+
+func (g *marshalGraph) remove(v *marshalVertex) {
+ for i, existing := range g.Vertices {
+ if v.ID == existing.ID {
+ g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...)
+ return
+ }
+ }
+}
+
+func (g *marshalGraph) connect(e *marshalEdge) {
+ g.Edges = append(g.Edges, e)
+ sort.Sort(edges(g.Edges))
+}
+
+func (g *marshalGraph) removeEdge(e *marshalEdge) {
+ for i, existing := range g.Edges {
+ if e.Source == existing.Source && e.Target == existing.Target {
+ g.Edges = append(g.Edges[:i], g.Edges[i+1:]...)
+ return
+ }
+ }
+}
+
+func (g *marshalGraph) vertexByID(id string) *marshalVertex {
+ for _, v := range g.Vertices {
+ if id == v.ID {
+ return v
+ }
+ }
+ return nil
+}
+
+type marshalVertex struct {
+ // Unique ID, used to reference this vertex from other structures.
+ ID string
+
+ // Human readable name
+ Name string `json:",omitempty"`
+
+ Attrs map[string]string `json:",omitempty"`
+
+ // This is to help transition from the old Dot interfaces. We record if the
+ // node was a GraphNodeDotter here, so we can call it to get attributes.
+ graphNodeDotter GraphNodeDotter
+}
+
+func newMarshalVertex(v Vertex) *marshalVertex {
+ dn, ok := v.(GraphNodeDotter)
+ if !ok {
+ dn = nil
+ }
+
+ return &marshalVertex{
+ ID: marshalVertexID(v),
+ Name: VertexName(v),
+ Attrs: make(map[string]string),
+ graphNodeDotter: dn,
+ }
+}
+
+// vertices is a sort.Interface implementation for sorting vertices by ID
+type vertices []*marshalVertex
+
+func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name }
+func (v vertices) Len() int { return len(v) }
+func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
+
+type marshalEdge struct {
+ // Human readable name
+ Name string
+
+ // Source and Target Vertices by ID
+ Source string
+ Target string
+
+ Attrs map[string]string `json:",omitempty"`
+}
+
+func newMarshalEdge(e Edge) *marshalEdge {
+ return &marshalEdge{
+ Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())),
+ Source: marshalVertexID(e.Source()),
+ Target: marshalVertexID(e.Target()),
+ Attrs: make(map[string]string),
+ }
+}
+
+// edges is a sort.Interface implementation for sorting edges by Source ID
+type edges []*marshalEdge
+
+func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name }
+func (e edges) Len() int { return len(e) }
+func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+
+// build a marshalGraph structure from a *Graph
+func newMarshalGraph(name string, g *Graph) *marshalGraph {
+ mg := &marshalGraph{
+ Type: "Graph",
+ Name: name,
+ Attrs: make(map[string]string),
+ }
+
+ for _, v := range g.Vertices() {
+ id := marshalVertexID(v)
+ if sg, ok := marshalSubgrapher(v); ok {
+ smg := newMarshalGraph(VertexName(v), sg)
+ smg.ID = id
+ mg.Subgraphs = append(mg.Subgraphs, smg)
+ }
+
+ mv := newMarshalVertex(v)
+ mg.Vertices = append(mg.Vertices, mv)
+ }
+
+ sort.Sort(vertices(mg.Vertices))
+
+ for _, e := range g.Edges() {
+ mg.Edges = append(mg.Edges, newMarshalEdge(e))
+ }
+
+ sort.Sort(edges(mg.Edges))
+
+ for _, c := range (&AcyclicGraph{*g}).Cycles() {
+ var cycle []*marshalVertex
+ for _, v := range c {
+ mv := newMarshalVertex(v)
+ cycle = append(cycle, mv)
+ }
+ mg.Cycles = append(mg.Cycles, cycle)
+ }
+
+ return mg
+}
+
+// Attempt to return a unique ID for any vertex.
+func marshalVertexID(v Vertex) string {
+ val := reflect.ValueOf(v)
+ switch val.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+ return strconv.Itoa(int(val.Pointer()))
+ case reflect.Interface:
+ return strconv.Itoa(int(val.InterfaceData()[1]))
+ }
+
+ if v, ok := v.(Hashable); ok {
+ h := v.Hashcode()
+ if h, ok := h.(string); ok {
+ return h
+ }
+ }
+
+ // fallback to a name, which we hope is unique.
+ return VertexName(v)
+
+ // we could try harder by attempting to read the arbitrary value from the
+ // interface, but we shouldn't get here from terraform right now.
+}
+
+// check for a Subgrapher, and return the underlying *Graph.
+func marshalSubgrapher(v Vertex) (*Graph, bool) {
+ sg, ok := v.(Subgrapher)
+ if !ok {
+ return nil, false
+ }
+
+ switch g := sg.Subgraph().DirectedGraph().(type) {
+ case *Graph:
+ return g, true
+ case *AcyclicGraph:
+ return &g.Graph, true
+ }
+
+ return nil, false
+}
+
+// The DebugOperationEnd func type provides a way to call an End function via a
+// method call, allowing for the chaining of methods in a defer statement.
+type DebugOperationEnd func(string)
+
+// End calls function e with the info parameter, marking the end of this
+// operation in the logs.
+func (e DebugOperationEnd) End(info string) { e(info) }
+
+// encoder provides methods to write debug data to an io.Writer, and is a noop
+// when no writer is present
+type encoder struct {
+ sync.Mutex
+ w io.Writer
+}
+
+// Encode is analogous to json.Encoder.Encode
+func (e *encoder) Encode(i interface{}) {
+ if e == nil || e.w == nil {
+ return
+ }
+ e.Lock()
+ defer e.Unlock()
+
+ js, err := json.Marshal(i)
+ if err != nil {
+ log.Println("[ERROR] dag:", err)
+ return
+ }
+ js = append(js, '\n')
+
+ _, err = e.w.Write(js)
+ if err != nil {
+ log.Println("[ERROR] dag:", err)
+ return
+ }
+}
+
+func (e *encoder) Add(v Vertex) {
+ e.Encode(marshalTransform{
+ Type: typeTransform,
+ AddVertex: newMarshalVertex(v),
+ })
+}
+
+// Remove records the removal of Vertex v.
+func (e *encoder) Remove(v Vertex) {
+ e.Encode(marshalTransform{
+ Type: typeTransform,
+ RemoveVertex: newMarshalVertex(v),
+ })
+}
+
+func (e *encoder) Connect(edge Edge) {
+ e.Encode(marshalTransform{
+ Type: typeTransform,
+ AddEdge: newMarshalEdge(edge),
+ })
+}
+
+func (e *encoder) RemoveEdge(edge Edge) {
+ e.Encode(marshalTransform{
+ Type: typeTransform,
+ RemoveEdge: newMarshalEdge(edge),
+ })
+}
+
+// BeginOperation marks the start of set of graph transformations, and returns
+// an EndDebugOperation func to be called once the opration is complete.
+func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd {
+ if e == nil {
+ return func(string) {}
+ }
+
+ e.Encode(marshalOperation{
+ Type: typeOperation,
+ Begin: op,
+ Info: info,
+ })
+
+ return func(info string) {
+ e.Encode(marshalOperation{
+ Type: typeOperation,
+ End: op,
+ Info: info,
+ })
+ }
+}
+
+// structure for recording graph transformations
+type marshalTransform struct {
+ // Type: "Transform"
+ Type string
+ AddEdge *marshalEdge `json:",omitempty"`
+ RemoveEdge *marshalEdge `json:",omitempty"`
+ AddVertex *marshalVertex `json:",omitempty"`
+ RemoveVertex *marshalVertex `json:",omitempty"`
+}
+
+func (t marshalTransform) Transform(g *marshalGraph) {
+ switch {
+ case t.AddEdge != nil:
+ g.connect(t.AddEdge)
+ case t.RemoveEdge != nil:
+ g.removeEdge(t.RemoveEdge)
+ case t.AddVertex != nil:
+ g.add(t.AddVertex)
+ case t.RemoveVertex != nil:
+ g.remove(t.RemoveVertex)
+ }
+}
+
+// this structure allows us to decode any object in the json stream for
+// inspection, then re-decode it into a proper struct if needed.
+type streamDecode struct {
+ Type string
+ Map map[string]interface{}
+ JSON []byte
+}
+
+func (s *streamDecode) UnmarshalJSON(d []byte) error {
+ s.JSON = d
+ err := json.Unmarshal(d, &s.Map)
+ if err != nil {
+ return err
+ }
+
+ if t, ok := s.Map["Type"]; ok {
+ s.Type, _ = t.(string)
+ }
+ return nil
+}
+
+// structure for recording the beginning and end of any multi-step
+// transformations. These are informational, and not required to reproduce the
+// graph state.
+type marshalOperation struct {
+ Type string
+ Begin string `json:",omitempty"`
+ End string `json:",omitempty"`
+ Info string `json:",omitempty"`
+}
+
+// decodeGraph decodes a marshalGraph from an encoded graph stream.
+func decodeGraph(r io.Reader) (*marshalGraph, error) {
+ dec := json.NewDecoder(r)
+
+ // a stream should always start with a graph
+ g := &marshalGraph{}
+
+ err := dec.Decode(g)
+ if err != nil {
+ return nil, err
+ }
+
+ // now replay any operations that occurred on the original graph
+ for dec.More() {
+ s := &streamDecode{}
+ err := dec.Decode(s)
+ if err != nil {
+ return g, err
+ }
+
+ // the only Type we're concerned with here is Transform to complete the
+ // Graph
+ if s.Type != typeTransform {
+ continue
+ }
+
+ t := &marshalTransform{}
+ err = json.Unmarshal(s.JSON, t)
+ if err != nil {
+ return g, err
+ }
+ t.Transform(g)
+ }
+ return g, nil
+}
+
+// marshalVertexInfo allows encoding arbitrary information about the a single
+// Vertex in the logs. These are accumulated for informational display while
+// rebuilding the graph.
+type marshalVertexInfo struct {
+ Type string
+ Vertex *marshalVertex
+ Info string
+}
+
+func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo {
+ return &marshalVertexInfo{
+ Type: infoType,
+ Vertex: newMarshalVertex(v),
+ Info: info,
+ }
+}
+
+// marshalEdgeInfo allows encoding arbitrary information about the a single
+// Edge in the logs. These are accumulated for informational display while
+// rebuilding the graph.
+type marshalEdgeInfo struct {
+ Type string
+ Edge *marshalEdge
+ Info string
+}
+
+func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo {
+ return &marshalEdgeInfo{
+ Type: infoType,
+ Edge: newMarshalEdge(e),
+ Info: info,
+ }
+}
+
+// JSON2Dot reads a Graph debug log from and io.Reader, and converts the final
+// graph dot format.
+//
+// TODO: Allow returning the output at a certain point during decode.
+// Encode extra information from the json log into the Dot.
+func JSON2Dot(r io.Reader) ([]byte, error) {
+ g, err := decodeGraph(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return g.Dot(nil), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/set.go b/vendor/github.com/hashicorp/terraform/dag/set.go
new file mode 100644
index 00000000..3929c9d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/set.go
@@ -0,0 +1,109 @@
+package dag
+
+import (
+ "sync"
+)
+
+// Set is a set data structure.
+type Set struct {
+ m map[interface{}]interface{}
+ once sync.Once
+}
+
+// Hashable is the interface used by set to get the hash code of a value.
+// If this isn't given, then the value of the item being added to the set
+// itself is used as the comparison value.
+type Hashable interface {
+ Hashcode() interface{}
+}
+
+// hashcode returns the hashcode used for set elements.
+func hashcode(v interface{}) interface{} {
+ if h, ok := v.(Hashable); ok {
+ return h.Hashcode()
+ }
+
+ return v
+}
+
+// Add adds an item to the set
+func (s *Set) Add(v interface{}) {
+ s.once.Do(s.init)
+ s.m[hashcode(v)] = v
+}
+
+// Delete removes an item from the set.
+func (s *Set) Delete(v interface{}) {
+ s.once.Do(s.init)
+ delete(s.m, hashcode(v))
+}
+
+// Include returns true/false of whether a value is in the set.
+func (s *Set) Include(v interface{}) bool {
+ s.once.Do(s.init)
+ _, ok := s.m[hashcode(v)]
+ return ok
+}
+
+// Intersection computes the set intersection with other.
+func (s *Set) Intersection(other *Set) *Set {
+ result := new(Set)
+ if s == nil {
+ return result
+ }
+ if other != nil {
+ for _, v := range s.m {
+ if other.Include(v) {
+ result.Add(v)
+ }
+ }
+ }
+
+ return result
+}
+
+// Difference returns a set with the elements that s has but
+// other doesn't.
+func (s *Set) Difference(other *Set) *Set {
+ result := new(Set)
+ if s != nil {
+ for k, v := range s.m {
+ var ok bool
+ if other != nil {
+ _, ok = other.m[k]
+ }
+ if !ok {
+ result.Add(v)
+ }
+ }
+ }
+
+ return result
+}
+
+// Len is the number of items in the set.
+func (s *Set) Len() int {
+ if s == nil {
+ return 0
+ }
+
+ return len(s.m)
+}
+
+// List returns the list of set elements.
+func (s *Set) List() []interface{} {
+ if s == nil {
+ return nil
+ }
+
+ r := make([]interface{}, 0, len(s.m))
+ for _, v := range s.m {
+ r = append(r, v)
+ }
+
+ return r
+}
+
+func (s *Set) init() {
+ s.m = make(map[interface{}]interface{})
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/tarjan.go b/vendor/github.com/hashicorp/terraform/dag/tarjan.go
new file mode 100644
index 00000000..9d8b25ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/tarjan.go
@@ -0,0 +1,107 @@
+package dag
+
+// StronglyConnected returns the list of strongly connected components
+// within the Graph g. This information is primarily used by this package
+// for cycle detection, but strongly connected components have widespread
+// use.
+func StronglyConnected(g *Graph) [][]Vertex {
+ vs := g.Vertices()
+ acct := sccAcct{
+ NextIndex: 1,
+ VertexIndex: make(map[Vertex]int, len(vs)),
+ }
+ for _, v := range vs {
+ // Recurse on any non-visited nodes
+ if acct.VertexIndex[v] == 0 {
+ stronglyConnected(&acct, g, v)
+ }
+ }
+ return acct.SCC
+}
+
+func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int {
+ // Initial vertex visit
+ index := acct.visit(v)
+ minIdx := index
+
+ for _, raw := range g.DownEdges(v).List() {
+ target := raw.(Vertex)
+ targetIdx := acct.VertexIndex[target]
+
+ // Recurse on successor if not yet visited
+ if targetIdx == 0 {
+ minIdx = min(minIdx, stronglyConnected(acct, g, target))
+ } else if acct.inStack(target) {
+ // Check if the vertex is in the stack
+ minIdx = min(minIdx, targetIdx)
+ }
+ }
+
+ // Pop the strongly connected components off the stack if
+ // this is a root vertex
+ if index == minIdx {
+ var scc []Vertex
+ for {
+ v2 := acct.pop()
+ scc = append(scc, v2)
+ if v2 == v {
+ break
+ }
+ }
+
+ acct.SCC = append(acct.SCC, scc)
+ }
+
+ return minIdx
+}
+
+func min(a, b int) int {
+ if a <= b {
+ return a
+ }
+ return b
+}
+
+// sccAcct is used ot pass around accounting information for
+// the StronglyConnectedComponents algorithm
+type sccAcct struct {
+ NextIndex int
+ VertexIndex map[Vertex]int
+ Stack []Vertex
+ SCC [][]Vertex
+}
+
+// visit assigns an index and pushes a vertex onto the stack
+func (s *sccAcct) visit(v Vertex) int {
+ idx := s.NextIndex
+ s.VertexIndex[v] = idx
+ s.NextIndex++
+ s.push(v)
+ return idx
+}
+
+// push adds a vertex to the stack
+func (s *sccAcct) push(n Vertex) {
+ s.Stack = append(s.Stack, n)
+}
+
+// pop removes a vertex from the stack
+func (s *sccAcct) pop() Vertex {
+ n := len(s.Stack)
+ if n == 0 {
+ return nil
+ }
+ vertex := s.Stack[n-1]
+ s.Stack = s.Stack[:n-1]
+ return vertex
+}
+
+// inStack checks if a vertex is in the stack
+func (s *sccAcct) inStack(needle Vertex) bool {
+ for _, n := range s.Stack {
+ if n == needle {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go
new file mode 100644
index 00000000..a74f1142
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/walk.go
@@ -0,0 +1,445 @@
+package dag
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/go-multierror"
+)
+
+// Walker is used to walk every vertex of a graph in parallel.
+//
+// A vertex will only be walked when the dependencies of that vertex have
+// been walked. If two vertices can be walked at the same time, they will be.
+//
+// Update can be called to update the graph. This can be called even during
+// a walk, cahnging vertices/edges mid-walk. This should be done carefully.
+// If a vertex is removed but has already been executed, the result of that
+// execution (any error) is still returned by Wait. Changing or re-adding
+// a vertex that has already executed has no effect. Changing edges of
+// a vertex that has already executed has no effect.
+//
+// Non-parallelism can be enforced by introducing a lock in your callback
+// function. However, the goroutine overhead of a walk will remain.
+// Walker will create V*2 goroutines (one for each vertex, and dependency
+// waiter for each vertex). In general this should be of no concern unless
+// there are a huge number of vertices.
+//
+// The walk is depth first by default. This can be changed with the Reverse
+// option.
+//
+// A single walker is only valid for one graph walk. After the walk is complete
+// you must construct a new walker to walk again. State for the walk is never
+// deleted in case vertices or edges are changed.
+type Walker struct {
+ // Callback is what is called for each vertex
+ Callback WalkFunc
+
+ // Reverse, if true, causes the source of an edge to depend on a target.
+ // When false (default), the target depends on the source.
+ Reverse bool
+
+ // changeLock must be held to modify any of the fields below. Only Update
+ // should modify these fields. Modifying them outside of Update can cause
+ // serious problems.
+ changeLock sync.Mutex
+ vertices Set
+ edges Set
+ vertexMap map[Vertex]*walkerVertex
+
+ // wait is done when all vertices have executed. It may become "undone"
+ // if new vertices are added.
+ wait sync.WaitGroup
+
+ // errMap contains the errors recorded so far for execution. Reading
+ // and writing should hold errLock.
+ errMap map[Vertex]error
+ errLock sync.Mutex
+}
+
+type walkerVertex struct {
+ // These should only be set once on initialization and never written again.
+ // They are not protected by a lock since they don't need to be since
+ // they are write-once.
+
+ // DoneCh is closed when this vertex has completed execution, regardless
+ // of success.
+ //
+ // CancelCh is closed when the vertex should cancel execution. If execution
+ // is already complete (DoneCh is closed), this has no effect. Otherwise,
+ // execution is cancelled as quickly as possible.
+ DoneCh chan struct{}
+ CancelCh chan struct{}
+
+ // Dependency information. Any changes to any of these fields requires
+ // holding DepsLock.
+ //
+ // DepsCh is sent a single value that denotes whether the upstream deps
+ // were successful (no errors). Any value sent means that the upstream
+ // dependencies are complete. No other values will ever be sent again.
+ //
+ // DepsUpdateCh is closed when there is a new DepsCh set.
+ DepsCh chan bool
+ DepsUpdateCh chan struct{}
+ DepsLock sync.Mutex
+
+ // Below is not safe to read/write in parallel. This behavior is
+ // enforced by changes only happening in Update. Nothing else should
+ // ever modify these.
+ deps map[Vertex]chan struct{}
+ depsCancelCh chan struct{}
+}
+
+// errWalkUpstream is used in the errMap of a walk to note that an upstream
+// dependency failed so this vertex wasn't run. This is not shown in the final
+// user-returned error.
+var errWalkUpstream = errors.New("upstream dependency failed")
+
+// Wait waits for the completion of the walk and returns any errors (
+// in the form of a multierror) that occurred. Update should be called
+// to populate the walk with vertices and edges prior to calling this.
+//
+// Wait will return as soon as all currently known vertices are complete.
+// If you plan on calling Update with more vertices in the future, you
+// should not call Wait until after this is done.
+func (w *Walker) Wait() error {
+ // Wait for completion
+ w.wait.Wait()
+
+ // Grab the error lock
+ w.errLock.Lock()
+ defer w.errLock.Unlock()
+
+ // Build the error
+ var result error
+ for v, err := range w.errMap {
+ if err != nil && err != errWalkUpstream {
+ result = multierror.Append(result, fmt.Errorf(
+ "%s: %s", VertexName(v), err))
+ }
+ }
+
+ return result
+}
+
+// Update updates the currently executing walk with the given graph.
+// This will perform a diff of the vertices and edges and update the walker.
+// Already completed vertices remain completed (including any errors during
+// their execution).
+//
+// This returns immediately once the walker is updated; it does not wait
+// for completion of the walk.
+//
+// Multiple Updates can be called in parallel. Update can be called at any
+// time during a walk.
+func (w *Walker) Update(g *AcyclicGraph) {
+ var v, e *Set
+ if g != nil {
+ v, e = g.vertices, g.edges
+ }
+
+ // Grab the change lock so no more updates happen but also so that
+ // no new vertices are executed during this time since we may be
+ // removing them.
+ w.changeLock.Lock()
+ defer w.changeLock.Unlock()
+
+ // Initialize fields
+ if w.vertexMap == nil {
+ w.vertexMap = make(map[Vertex]*walkerVertex)
+ }
+
+ // Calculate all our sets
+ newEdges := e.Difference(&w.edges)
+ oldEdges := w.edges.Difference(e)
+ newVerts := v.Difference(&w.vertices)
+ oldVerts := w.vertices.Difference(v)
+
+ // Add the new vertices
+ for _, raw := range newVerts.List() {
+ v := raw.(Vertex)
+
+ // Add to the waitgroup so our walk is not done until everything finishes
+ w.wait.Add(1)
+
+ // Add to our own set so we know about it already
+ log.Printf("[DEBUG] dag/walk: added new vertex: %q", VertexName(v))
+ w.vertices.Add(raw)
+
+ // Initialize the vertex info
+ info := &walkerVertex{
+ DoneCh: make(chan struct{}),
+ CancelCh: make(chan struct{}),
+ deps: make(map[Vertex]chan struct{}),
+ }
+
+ // Add it to the map and kick off the walk
+ w.vertexMap[v] = info
+ }
+
+ // Remove the old vertices
+ for _, raw := range oldVerts.List() {
+ v := raw.(Vertex)
+
+ // Get the vertex info so we can cancel it
+ info, ok := w.vertexMap[v]
+ if !ok {
+ // This vertex for some reason was never in our map. This
+ // shouldn't be possible.
+ continue
+ }
+
+ // Cancel the vertex
+ close(info.CancelCh)
+
+ // Delete it out of the map
+ delete(w.vertexMap, v)
+
+ log.Printf("[DEBUG] dag/walk: removed vertex: %q", VertexName(v))
+ w.vertices.Delete(raw)
+ }
+
+ // Add the new edges
+ var changedDeps Set
+ for _, raw := range newEdges.List() {
+ edge := raw.(Edge)
+ waiter, dep := w.edgeParts(edge)
+
+ // Get the info for the waiter
+ waiterInfo, ok := w.vertexMap[waiter]
+ if !ok {
+ // Vertex doesn't exist... shouldn't be possible but ignore.
+ continue
+ }
+
+ // Get the info for the dep
+ depInfo, ok := w.vertexMap[dep]
+ if !ok {
+ // Vertex doesn't exist... shouldn't be possible but ignore.
+ continue
+ }
+
+ // Add the dependency to our waiter
+ waiterInfo.deps[dep] = depInfo.DoneCh
+
+ // Record that the deps changed for this waiter
+ changedDeps.Add(waiter)
+
+ log.Printf(
+ "[DEBUG] dag/walk: added edge: %q waiting on %q",
+ VertexName(waiter), VertexName(dep))
+ w.edges.Add(raw)
+ }
+
+ // Process reoved edges
+ for _, raw := range oldEdges.List() {
+ edge := raw.(Edge)
+ waiter, dep := w.edgeParts(edge)
+
+ // Get the info for the waiter
+ waiterInfo, ok := w.vertexMap[waiter]
+ if !ok {
+ // Vertex doesn't exist... shouldn't be possible but ignore.
+ continue
+ }
+
+ // Delete the dependency from the waiter
+ delete(waiterInfo.deps, dep)
+
+ // Record that the deps changed for this waiter
+ changedDeps.Add(waiter)
+
+ log.Printf(
+ "[DEBUG] dag/walk: removed edge: %q waiting on %q",
+ VertexName(waiter), VertexName(dep))
+ w.edges.Delete(raw)
+ }
+
+ // For each vertex with changed dependencies, we need to kick off
+ // a new waiter and notify the vertex of the changes.
+ for _, raw := range changedDeps.List() {
+ v := raw.(Vertex)
+ info, ok := w.vertexMap[v]
+ if !ok {
+ // Vertex doesn't exist... shouldn't be possible but ignore.
+ continue
+ }
+
+ // Create a new done channel
+ doneCh := make(chan bool, 1)
+
+ // Create the channel we close for cancellation
+ cancelCh := make(chan struct{})
+
+ // Build a new deps copy
+ deps := make(map[Vertex]<-chan struct{})
+ for k, v := range info.deps {
+ deps[k] = v
+ }
+
+ // Update the update channel
+ info.DepsLock.Lock()
+ if info.DepsUpdateCh != nil {
+ close(info.DepsUpdateCh)
+ }
+ info.DepsCh = doneCh
+ info.DepsUpdateCh = make(chan struct{})
+ info.DepsLock.Unlock()
+
+ // Cancel the older waiter
+ if info.depsCancelCh != nil {
+ close(info.depsCancelCh)
+ }
+ info.depsCancelCh = cancelCh
+
+ log.Printf(
+ "[DEBUG] dag/walk: dependencies changed for %q, sending new deps",
+ VertexName(v))
+
+ // Start the waiter
+ go w.waitDeps(v, deps, doneCh, cancelCh)
+ }
+
+ // Start all the new vertices. We do this at the end so that all
+ // the edge waiters and changes are setup above.
+ for _, raw := range newVerts.List() {
+ v := raw.(Vertex)
+ go w.walkVertex(v, w.vertexMap[v])
+ }
+}
+
+// edgeParts returns the waiter and the dependency, in that order.
+// The waiter is waiting on the dependency.
+func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) {
+ if w.Reverse {
+ return e.Source(), e.Target()
+ }
+
+ return e.Target(), e.Source()
+}
+
+// walkVertex walks a single vertex, waiting for any dependencies before
+// executing the callback.
+func (w *Walker) walkVertex(v Vertex, info *walkerVertex) {
+ // When we're done executing, lower the waitgroup count
+ defer w.wait.Done()
+
+ // When we're done, always close our done channel
+ defer close(info.DoneCh)
+
+ // Wait for our dependencies. We create a [closed] deps channel so
+ // that we can immediately fall through to load our actual DepsCh.
+ var depsSuccess bool
+ var depsUpdateCh chan struct{}
+ depsCh := make(chan bool, 1)
+ depsCh <- true
+ close(depsCh)
+ for {
+ select {
+ case <-info.CancelCh:
+ // Cancel
+ return
+
+ case depsSuccess = <-depsCh:
+ // Deps complete! Mark as nil to trigger completion handling.
+ depsCh = nil
+
+ case <-depsUpdateCh:
+ // New deps, reloop
+ }
+
+ // Check if we have updated dependencies. This can happen if the
+ // dependencies were satisfied exactly prior to an Update occuring.
+ // In that case, we'd like to take into account new dependencies
+ // if possible.
+ info.DepsLock.Lock()
+ if info.DepsCh != nil {
+ depsCh = info.DepsCh
+ info.DepsCh = nil
+ }
+ if info.DepsUpdateCh != nil {
+ depsUpdateCh = info.DepsUpdateCh
+ }
+ info.DepsLock.Unlock()
+
+ // If we still have no deps channel set, then we're done!
+ if depsCh == nil {
+ break
+ }
+ }
+
+ // If we passed dependencies, we just want to check once more that
+ // we're not cancelled, since this can happen just as dependencies pass.
+ select {
+ case <-info.CancelCh:
+ // Cancelled during an update while dependencies completed.
+ return
+ default:
+ }
+
+ // Run our callback or note that our upstream failed
+ var err error
+ if depsSuccess {
+ log.Printf("[DEBUG] dag/walk: walking %q", VertexName(v))
+ err = w.Callback(v)
+ } else {
+ log.Printf("[DEBUG] dag/walk: upstream errored, not walking %q", VertexName(v))
+ err = errWalkUpstream
+ }
+
+ // Record the error
+ if err != nil {
+ w.errLock.Lock()
+ defer w.errLock.Unlock()
+
+ if w.errMap == nil {
+ w.errMap = make(map[Vertex]error)
+ }
+ w.errMap[v] = err
+ }
+}
+
+func (w *Walker) waitDeps(
+ v Vertex,
+ deps map[Vertex]<-chan struct{},
+ doneCh chan<- bool,
+ cancelCh <-chan struct{}) {
+ // For each dependency given to us, wait for it to complete
+ for dep, depCh := range deps {
+ DepSatisfied:
+ for {
+ select {
+ case <-depCh:
+ // Dependency satisfied!
+ break DepSatisfied
+
+ case <-cancelCh:
+ // Wait cancelled. Note that we didn't satisfy dependencies
+ // so that anything waiting on us also doesn't run.
+ doneCh <- false
+ return
+
+ case <-time.After(time.Second * 5):
+ log.Printf("[DEBUG] dag/walk: vertex %q, waiting for: %q",
+ VertexName(v), VertexName(dep))
+ }
+ }
+ }
+
+ // Dependencies satisfied! We need to check if any errored
+ w.errLock.Lock()
+ defer w.errLock.Unlock()
+ for dep, _ := range deps {
+ if w.errMap[dep] != nil {
+ // One of our dependencies failed, so return false
+ doneCh <- false
+ return
+ }
+ }
+
+ // All dependencies satisfied and successful
+ doneCh <- true
+}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go
new file mode 100644
index 00000000..2bfb3fe3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/flatmap/expand.go
@@ -0,0 +1,147 @@
+package flatmap
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/hil"
+)
+
+// Expand takes a map and a key (prefix) and expands that value into
+// a more complex structure. This is the reverse of the Flatten operation.
+func Expand(m map[string]string, key string) interface{} {
+ // If the key is exactly a key in the map, just return it
+ if v, ok := m[key]; ok {
+ if v == "true" {
+ return true
+ } else if v == "false" {
+ return false
+ }
+
+ return v
+ }
+
+ // Check if the key is an array, and if so, expand the array
+ if v, ok := m[key+".#"]; ok {
+ // If the count of the key is unknown, then just put the unknown
+ // value in the value itself. This will be detected by Terraform
+ // core later.
+ if v == hil.UnknownValue {
+ return v
+ }
+
+ return expandArray(m, key)
+ }
+
+ // Check if this is a prefix in the map
+ prefix := key + "."
+ for k := range m {
+ if strings.HasPrefix(k, prefix) {
+ return expandMap(m, prefix)
+ }
+ }
+
+ return nil
+}
+
+func expandArray(m map[string]string, prefix string) []interface{} {
+ num, err := strconv.ParseInt(m[prefix+".#"], 0, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ // If the number of elements in this array is 0, then return an
+ // empty slice as there is nothing to expand. Trying to expand it
+ // anyway could lead to crashes as any child maps, arrays or sets
+ // that no longer exist are still shown as empty with a count of 0.
+ if num == 0 {
+ return []interface{}{}
+ }
+
+ // The Schema "Set" type stores its values in an array format, but
+ // using numeric hash values instead of ordinal keys. Take the set
+ // of keys regardless of value, and expand them in numeric order.
+ // See GH-11042 for more details.
+ keySet := map[int]bool{}
+ computed := map[string]bool{}
+ for k := range m {
+ if !strings.HasPrefix(k, prefix+".") {
+ continue
+ }
+
+ key := k[len(prefix)+1:]
+ idx := strings.Index(key, ".")
+ if idx != -1 {
+ key = key[:idx]
+ }
+
+ // skip the count value
+ if key == "#" {
+ continue
+ }
+
+ // strip the computed flag if there is one
+ if strings.HasPrefix(key, "~") {
+ key = key[1:]
+ computed[key] = true
+ }
+
+ k, err := strconv.Atoi(key)
+ if err != nil {
+ panic(err)
+ }
+ keySet[int(k)] = true
+ }
+
+ keysList := make([]int, 0, num)
+ for key := range keySet {
+ keysList = append(keysList, key)
+ }
+ sort.Ints(keysList)
+
+ result := make([]interface{}, num)
+ for i, key := range keysList {
+ keyString := strconv.Itoa(key)
+ if computed[keyString] {
+ keyString = "~" + keyString
+ }
+ result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString))
+ }
+
+ return result
+}
+
+func expandMap(m map[string]string, prefix string) map[string]interface{} {
+ // Submaps may not have a '%' key, so we can't count on this value being
+ // here. If we don't have a count, just procede as if we have have a map.
+ if count, ok := m[prefix+"%"]; ok && count == "0" {
+ return map[string]interface{}{}
+ }
+
+ result := make(map[string]interface{})
+ for k := range m {
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+
+ key := k[len(prefix):]
+ idx := strings.Index(key, ".")
+ if idx != -1 {
+ key = key[:idx]
+ }
+ if _, ok := result[key]; ok {
+ continue
+ }
+
+ // skip the map count value
+ if key == "%" {
+ continue
+ }
+
+ result[key] = Expand(m, k[:len(prefix)+len(key)])
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go
new file mode 100644
index 00000000..9ff6e426
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go
@@ -0,0 +1,71 @@
+package flatmap
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Flatten takes a structure and turns into a flat map[string]string.
+//
+// Within the "thing" parameter, only primitive values are allowed. Structs are
+// not supported. Therefore, it can only be slices, maps, primitives, and
+// any combination of those together.
+//
+// See the tests for examples of what inputs are turned into.
+func Flatten(thing map[string]interface{}) Map {
+ result := make(map[string]string)
+
+ for k, raw := range thing {
+ flatten(result, k, reflect.ValueOf(raw))
+ }
+
+ return Map(result)
+}
+
+func flatten(result map[string]string, prefix string, v reflect.Value) {
+ if v.Kind() == reflect.Interface {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Bool:
+ if v.Bool() {
+ result[prefix] = "true"
+ } else {
+ result[prefix] = "false"
+ }
+ case reflect.Int:
+ result[prefix] = fmt.Sprintf("%d", v.Int())
+ case reflect.Map:
+ flattenMap(result, prefix, v)
+ case reflect.Slice:
+ flattenSlice(result, prefix, v)
+ case reflect.String:
+ result[prefix] = v.String()
+ default:
+ panic(fmt.Sprintf("Unknown: %s", v))
+ }
+}
+
+func flattenMap(result map[string]string, prefix string, v reflect.Value) {
+ for _, k := range v.MapKeys() {
+ if k.Kind() == reflect.Interface {
+ k = k.Elem()
+ }
+
+ if k.Kind() != reflect.String {
+ panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k))
+ }
+
+ flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k))
+ }
+}
+
+func flattenSlice(result map[string]string, prefix string, v reflect.Value) {
+ prefix = prefix + "."
+
+ result[prefix+"#"] = fmt.Sprintf("%d", v.Len())
+ for i := 0; i < v.Len(); i++ {
+ flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/map.go b/vendor/github.com/hashicorp/terraform/flatmap/map.go
new file mode 100644
index 00000000..46b72c40
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/flatmap/map.go
@@ -0,0 +1,82 @@
+package flatmap
+
+import (
+ "strings"
+)
+
+// Map is a wrapper around map[string]string that provides some helpers
+// above it that assume the map is in the format that flatmap expects
+// (the result of Flatten).
+//
+// All modifying functions such as Delete are done in-place unless
+// otherwise noted.
+type Map map[string]string
+
+// Contains returns true if the map contains the given key.
+func (m Map) Contains(key string) bool {
+ for _, k := range m.Keys() {
+ if k == key {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Delete deletes a key out of the map with the given prefix.
+func (m Map) Delete(prefix string) {
+ for k, _ := range m {
+ match := k == prefix
+ if !match {
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+
+ if k[len(prefix):len(prefix)+1] != "." {
+ continue
+ }
+ }
+
+ delete(m, k)
+ }
+}
+
+// Keys returns all of the top-level keys in this map
+func (m Map) Keys() []string {
+ ks := make(map[string]struct{})
+ for k, _ := range m {
+ idx := strings.Index(k, ".")
+ if idx == -1 {
+ idx = len(k)
+ }
+
+ ks[k[:idx]] = struct{}{}
+ }
+
+ result := make([]string, 0, len(ks))
+ for k, _ := range ks {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+// Merge merges the contents of the other Map into this one.
+//
+// This merge is smarter than a simple map iteration because it
+// will fully replace arrays and other complex structures that
+// are present in this map with the other map's. For example, if
+// this map has a 3 element "foo" list, and m2 has a 2 element "foo"
+// list, then the result will be that m has a 2 element "foo"
+// list.
+func (m Map) Merge(m2 Map) {
+ for _, prefix := range m2.Keys() {
+ m.Delete(prefix)
+
+ for k, v := range m2 {
+ if strings.HasPrefix(k, prefix) {
+ m[k] = v
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/README.md b/vendor/github.com/hashicorp/terraform/helper/README.md
new file mode 100644
index 00000000..d0fee068
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/README.md
@@ -0,0 +1,7 @@
+# Helper Libraries
+
+This folder contains helper libraries for Terraform plugins. A running
+joke is that this is "Terraform standard library" for plugins. The goal
+of the packages in this directory are to provide high-level helpers to
+make it easier to implement the various aspects of writing a plugin for
+Terraform.
diff --git a/vendor/github.com/hashicorp/terraform/helper/config/decode.go b/vendor/github.com/hashicorp/terraform/helper/config/decode.go
new file mode 100644
index 00000000..f470c9b4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/config/decode.go
@@ -0,0 +1,28 @@
+package config
+
+import (
+ "github.com/mitchellh/mapstructure"
+)
+
+func Decode(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) {
+ var md mapstructure.Metadata
+ decoderConfig := &mapstructure.DecoderConfig{
+ Metadata: &md,
+ Result: target,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := mapstructure.NewDecoder(decoderConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, raw := range raws {
+ err := decoder.Decode(raw)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &md, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/config/validator.go b/vendor/github.com/hashicorp/terraform/helper/config/validator.go
new file mode 100644
index 00000000..1a6e023b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/config/validator.go
@@ -0,0 +1,214 @@
+package config
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/flatmap"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Validator is a helper that helps you validate the configuration
+// of your resource, resource provider, etc.
+//
+// At the most basic level, set the Required and Optional lists to be
+// specifiers of keys that are required or optional. If a key shows up
+// that isn't in one of these two lists, then an error is generated.
+//
+// The "specifiers" allowed in this is a fairly rich syntax to help
+// describe the format of your configuration:
+//
+// * Basic keys are just strings. For example: "foo" will match the
+// "foo" key.
+//
+// * Nested structure keys can be matched by doing
+// "listener.*.foo". This will verify that there is at least one
+// listener element that has the "foo" key set.
+//
+// * The existence of a nested structure can be checked by simply
+// doing "listener.*" which will verify that there is at least
+// one element in the "listener" structure. This is NOT
+// validating that "listener" is an array. It is validating
+// that it is a nested structure in the configuration.
+//
+type Validator struct {
+ Required []string
+ Optional []string
+}
+
+func (v *Validator) Validate(
+ c *terraform.ResourceConfig) (ws []string, es []error) {
+ // Flatten the configuration so it is easier to reason about
+ flat := flatmap.Flatten(c.Raw)
+
+ keySet := make(map[string]validatorKey)
+ for i, vs := range [][]string{v.Required, v.Optional} {
+ req := i == 0
+ for _, k := range vs {
+ vk, err := newValidatorKey(k, req)
+ if err != nil {
+ es = append(es, err)
+ continue
+ }
+
+ keySet[k] = vk
+ }
+ }
+
+ purged := make([]string, 0)
+ for _, kv := range keySet {
+ p, w, e := kv.Validate(flat)
+ if len(w) > 0 {
+ ws = append(ws, w...)
+ }
+ if len(e) > 0 {
+ es = append(es, e...)
+ }
+
+ purged = append(purged, p...)
+ }
+
+ // Delete all the keys we processed in order to find
+ // the unknown keys.
+ for _, p := range purged {
+ delete(flat, p)
+ }
+
+ // The rest are unknown
+ for k, _ := range flat {
+ es = append(es, fmt.Errorf("Unknown configuration: %s", k))
+ }
+
+ return
+}
+
+type validatorKey interface {
+ // Validate validates the given configuration and returns viewed keys,
+ // warnings, and errors.
+ Validate(map[string]string) ([]string, []string, []error)
+}
+
+func newValidatorKey(k string, req bool) (validatorKey, error) {
+ var result validatorKey
+
+ parts := strings.Split(k, ".")
+ if len(parts) > 1 && parts[1] == "*" {
+ result = &nestedValidatorKey{
+ Parts: parts,
+ Required: req,
+ }
+ } else {
+ result = &basicValidatorKey{
+ Key: k,
+ Required: req,
+ }
+ }
+
+ return result, nil
+}
+
+// basicValidatorKey validates keys that are basic such as "foo"
+type basicValidatorKey struct {
+ Key string
+ Required bool
+}
+
+func (v *basicValidatorKey) Validate(
+ m map[string]string) ([]string, []string, []error) {
+ for k, _ := range m {
+ // If we have the exact key its a match
+ if k == v.Key {
+ return []string{k}, nil, nil
+ }
+ }
+
+ if !v.Required {
+ return nil, nil, nil
+ }
+
+ return nil, nil, []error{fmt.Errorf(
+ "Key not found: %s", v.Key)}
+}
+
+type nestedValidatorKey struct {
+ Parts []string
+ Required bool
+}
+
+func (v *nestedValidatorKey) validate(
+ m map[string]string,
+ prefix string,
+ offset int) ([]string, []string, []error) {
+ if offset >= len(v.Parts) {
+ // We're at the end. Look for a specific key.
+ v2 := &basicValidatorKey{Key: prefix, Required: v.Required}
+ return v2.Validate(m)
+ }
+
+ current := v.Parts[offset]
+
+ // If we're at offset 0, special case to start at the next one.
+ if offset == 0 {
+ return v.validate(m, current, offset+1)
+ }
+
+ // Determine if we're doing a "for all" or a specific key
+ if current != "*" {
+ // We're looking at a specific key, continue on.
+ return v.validate(m, prefix+"."+current, offset+1)
+ }
+
+ // We're doing a "for all", so we loop over.
+ countStr, ok := m[prefix+".#"]
+ if !ok {
+ if !v.Required {
+ // It wasn't required, so its no problem.
+ return nil, nil, nil
+ }
+
+ return nil, nil, []error{fmt.Errorf(
+ "Key not found: %s", prefix)}
+ }
+
+ count, err := strconv.ParseInt(countStr, 0, 0)
+ if err != nil {
+ // This shouldn't happen if flatmap works properly
+ panic("invalid flatmap array")
+ }
+
+ var e []error
+ var w []string
+ u := make([]string, 1, count+1)
+ u[0] = prefix + ".#"
+ for i := 0; i < int(count); i++ {
+ prefix := fmt.Sprintf("%s.%d", prefix, i)
+
+ // Mark that we saw this specific key
+ u = append(u, prefix)
+
+ // Mark all prefixes of this
+ for k, _ := range m {
+ if !strings.HasPrefix(k, prefix+".") {
+ continue
+ }
+ u = append(u, k)
+ }
+
+ // If we have more parts, then validate deeper
+ if offset+1 < len(v.Parts) {
+ u2, w2, e2 := v.validate(m, prefix, offset+1)
+
+ u = append(u, u2...)
+ w = append(w, w2...)
+ e = append(e, e2...)
+ }
+ }
+
+ return u, w, e
+}
+
+func (v *nestedValidatorKey) Validate(
+ m map[string]string) ([]string, []string, []error) {
+ return v.validate(m, "", 0)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
new file mode 100644
index 00000000..18b8837c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
@@ -0,0 +1,154 @@
+// experiment package contains helper functions for tracking experimental
+// features throughout Terraform.
+//
+// This package should be used for creating, enabling, querying, and deleting
+// experimental features. By unifying all of that onto a single interface,
+// we can have the Go compiler help us by enforcing every place we touch
+// an experimental feature.
+//
+// To create a new experiment:
+//
+// 1. Add the experiment to the global vars list below, prefixed with X_
+//
+// 2. Add the experiment variable to the All listin the init() function
+//
+// 3. Use it!
+//
+// To remove an experiment:
+//
+// 1. Delete the experiment global var.
+//
+// 2. Try to compile and fix all the places where the var was referenced.
+//
+// To use an experiment:
+//
+// 1. Use Flag() if you want the experiment to be available from the CLI.
+//
+// 2. Use Enabled() to check whether it is enabled.
+//
+// As a general user:
+//
+// 1. The `-Xexperiment-name` flag
+// 2. The `TF_X_<experiment-name>` env var.
+// 3. The `TF_X_FORCE` env var can be set to force an experimental feature
+// without human verifications.
+//
+package experiment
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// The experiments that are available are listed below. Any package in
+// Terraform defining an experiment should define the experiments below.
+// By keeping them all within the experiment package we force a single point
+// of definition and use. This allows the compiler to enforce references
+// so it becomes easy to remove the features.
+var (
+ // Shadow graph. This is already on by default. Disabling it will be
+ // allowed for awhile in order for it to not block operations.
+ X_shadow = newBasicID("shadow", "SHADOW", false)
+)
+
+// Global variables this package uses because we are a package
+// with global state.
+var (
+ // all is the list of all experiements. Do not modify this.
+ All []ID
+
+ // enabled keeps track of what flags have been enabled
+ enabled map[string]bool
+ enabledLock sync.Mutex
+
+ // Hidden "experiment" that forces all others to be on without verification
+ x_force = newBasicID("force", "FORCE", false)
+)
+
+func init() {
+ // The list of all experiments, update this when an experiment is added.
+ All = []ID{
+ X_shadow,
+ x_force,
+ }
+
+ // Load
+ reload()
+}
+
+// reload is used by tests to reload the global state. This is called by
+// init publicly.
+func reload() {
+ // Initialize
+ enabledLock.Lock()
+ enabled = make(map[string]bool)
+ enabledLock.Unlock()
+
+ // Set defaults and check env vars
+ for _, id := range All {
+ // Get the default value
+ def := id.Default()
+
+ // If we set it in the env var, default it to true
+ key := fmt.Sprintf("TF_X_%s", strings.ToUpper(id.Env()))
+ if v := os.Getenv(key); v != "" {
+ def = v != "0"
+ }
+
+ // Set the default
+ SetEnabled(id, def)
+ }
+}
+
+// Enabled returns whether an experiment has been enabled or not.
+func Enabled(id ID) bool {
+ enabledLock.Lock()
+ defer enabledLock.Unlock()
+ return enabled[id.Flag()]
+}
+
+// SetEnabled sets an experiment to enabled/disabled. Please check with
+// the experiment docs for when calling this actually affects the experiment.
+func SetEnabled(id ID, v bool) {
+ enabledLock.Lock()
+ defer enabledLock.Unlock()
+ enabled[id.Flag()] = v
+}
+
+// Force returns true if the -Xforce of TF_X_FORCE flag is present, which
+// advises users of this package to not verify with the user that they want
+// experimental behavior and to just continue with it.
+func Force() bool {
+ return Enabled(x_force)
+}
+
+// Flag configures the given FlagSet with the flags to configure
+// all active experiments.
+func Flag(fs *flag.FlagSet) {
+ for _, id := range All {
+ desc := id.Flag()
+ key := fmt.Sprintf("X%s", id.Flag())
+ fs.Var(&idValue{X: id}, key, desc)
+ }
+}
+
+// idValue implements flag.Value for setting the enabled/disabled state
+// of an experiment from the CLI.
+type idValue struct {
+ X ID
+}
+
+func (v *idValue) IsBoolFlag() bool { return true }
+func (v *idValue) String() string { return strconv.FormatBool(Enabled(v.X)) }
+func (v *idValue) Set(raw string) error {
+ b, err := strconv.ParseBool(raw)
+ if err == nil {
+ SetEnabled(v.X, b)
+ }
+
+ return err
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
new file mode 100644
index 00000000..8e2f7073
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
@@ -0,0 +1,34 @@
+package experiment
+
+// ID represents an experimental feature.
+//
+// The global vars defined on this package should be used as ID values.
+// This interface is purposely not implement-able outside of this package
+// so that we can rely on the Go compiler to enforce all experiment references.
+type ID interface {
+ Env() string
+ Flag() string
+ Default() bool
+
+ unexported() // So the ID can't be implemented externally.
+}
+
+// basicID implements ID.
+type basicID struct {
+ EnvValue string
+ FlagValue string
+ DefaultValue bool
+}
+
+func newBasicID(flag, env string, def bool) ID {
+ return &basicID{
+ EnvValue: env,
+ FlagValue: flag,
+ DefaultValue: def,
+ }
+}
+
+func (id *basicID) Env() string { return id.EnvValue }
+func (id *basicID) Flag() string { return id.FlagValue }
+func (id *basicID) Default() bool { return id.DefaultValue }
+func (id *basicID) unexported() {}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
new file mode 100644
index 00000000..64d8263e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
@@ -0,0 +1,22 @@
+package hashcode
+
+import (
+ "hash/crc32"
+)
+
+// String hashes a string to a unique hashcode.
+//
+// crc32 returns a uint32, but for our use we need
+// and non negative integer. Here we cast to an integer
+// and invert it if the result is negative.
+func String(s string) int {
+ v := int(crc32.ChecksumIEEE([]byte(s)))
+ if v >= 0 {
+ return v
+ }
+ if -v >= 0 {
+ return -v
+ }
+ // v == MinInt
+ return 0
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go
new file mode 100644
index 00000000..67be1df1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go
@@ -0,0 +1,41 @@
+package hilmapstructure
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+var hilMapstructureDecodeHookEmptySlice []interface{}
+var hilMapstructureDecodeHookStringSlice []string
+var hilMapstructureDecodeHookEmptyMap map[string]interface{}
+
+// WeakDecode behaves in the same way as mapstructure.WeakDecode but has a
+// DecodeHook which defeats the backward compatibility mode of mapstructure
+// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This
+// allows us to use WeakDecode (desirable), but not fail on empty lists.
+func WeakDecode(m interface{}, rawVal interface{}) error {
+ config := &mapstructure.DecoderConfig{
+ DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) {
+ sliceType := reflect.TypeOf(hilMapstructureDecodeHookEmptySlice)
+ stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice)
+ mapType := reflect.TypeOf(hilMapstructureDecodeHookEmptyMap)
+
+ if (source == sliceType || source == stringSliceType) && target == mapType {
+ return nil, fmt.Errorf("Cannot convert a []interface{} into a map[string]interface{}")
+ }
+
+ return val, nil
+ },
+ WeaklyTypedInput: true,
+ Result: rawVal,
+ }
+
+ decoder, err := mapstructure.NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(m)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
new file mode 100644
index 00000000..433cd77d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
@@ -0,0 +1,100 @@
+package logging
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+ "syscall"
+
+ "github.com/hashicorp/logutils"
+)
+
+// These are the environmental variables that determine if we log, and if
+// we log whether or not the log should go to a file.
+const (
+ EnvLog = "TF_LOG" // Set to True
+ EnvLogFile = "TF_LOG_PATH" // Set to a file
+)
+
+var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"}
+
+// LogOutput determines where we should send logs (if anywhere) and the log level.
+func LogOutput() (logOutput io.Writer, err error) {
+ logOutput = ioutil.Discard
+
+ logLevel := LogLevel()
+ if logLevel == "" {
+ return
+ }
+
+ logOutput = os.Stderr
+ if logPath := os.Getenv(EnvLogFile); logPath != "" {
+ var err error
+ logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // This was the default since the beginning
+ logOutput = &logutils.LevelFilter{
+ Levels: validLevels,
+ MinLevel: logutils.LogLevel(logLevel),
+ Writer: logOutput,
+ }
+
+ return
+}
+
+// SetOutput checks for a log destination with LogOutput, and calls
+// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses
+// ioutil.Discard. Any error from LogOutout is fatal.
+func SetOutput() {
+ out, err := LogOutput()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if out == nil {
+ out = ioutil.Discard
+ }
+
+ log.SetOutput(out)
+}
+
+// LogLevel returns the current log level string based the environment vars
+func LogLevel() string {
+ envLevel := os.Getenv(EnvLog)
+ if envLevel == "" {
+ return ""
+ }
+
+ logLevel := "TRACE"
+ if isValidLogLevel(envLevel) {
+ // allow following for better ux: info, Info or INFO
+ logLevel = strings.ToUpper(envLevel)
+ } else {
+ log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v",
+ envLevel, validLevels)
+ }
+
+ return logLevel
+}
+
+// IsDebugOrHigher returns whether or not the current log level is debug or trace
+func IsDebugOrHigher() bool {
+ level := string(LogLevel())
+ return level == "DEBUG" || level == "TRACE"
+}
+
+func isValidLogLevel(level string) bool {
+ for _, l := range validLevels {
+ if strings.ToUpper(level) == string(l) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/error.go b/vendor/github.com/hashicorp/terraform/helper/resource/error.go
new file mode 100644
index 00000000..7ee21614
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/error.go
@@ -0,0 +1,79 @@
+package resource
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+type NotFoundError struct {
+ LastError error
+ LastRequest interface{}
+ LastResponse interface{}
+ Message string
+ Retries int
+}
+
+func (e *NotFoundError) Error() string {
+ if e.Message != "" {
+ return e.Message
+ }
+
+ if e.Retries > 0 {
+ return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
+ }
+
+ return "couldn't find resource"
+}
+
+// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
+type UnexpectedStateError struct {
+ LastError error
+ State string
+ ExpectedState []string
+}
+
+func (e *UnexpectedStateError) Error() string {
+ return fmt.Sprintf(
+ "unexpected state '%s', wanted target '%s'. last error: %s",
+ e.State,
+ strings.Join(e.ExpectedState, ", "),
+ e.LastError,
+ )
+}
+
+// TimeoutError is returned when WaitForState times out
+type TimeoutError struct {
+ LastError error
+ LastState string
+ Timeout time.Duration
+ ExpectedState []string
+}
+
+func (e *TimeoutError) Error() string {
+ expectedState := "resource to be gone"
+ if len(e.ExpectedState) > 0 {
+ expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
+ }
+
+ extraInfo := make([]string, 0)
+ if e.LastState != "" {
+ extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
+ }
+ if e.Timeout > 0 {
+ extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
+ }
+
+ suffix := ""
+ if len(extraInfo) > 0 {
+ suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
+ }
+
+ if e.LastError != nil {
+ return fmt.Sprintf("timeout while waiting for %s%s: %s",
+ expectedState, suffix, e.LastError)
+ }
+
+ return fmt.Sprintf("timeout while waiting for %s%s",
+ expectedState, suffix)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
new file mode 100644
index 00000000..629582b3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
@@ -0,0 +1,39 @@
+package resource
+
+import (
+ "crypto/rand"
+ "fmt"
+ "math/big"
+ "sync"
+)
+
+const UniqueIdPrefix = `terraform-`
+
+// idCounter is a randomly seeded monotonic counter for generating ordered
+// unique ids. It uses a big.Int so we can easily increment a long numeric
+// string. The max possible hex value here with 12 random bytes is
+// "01000000000000000000000000", so there's no chance of rollover during
+// operation.
+var idMutex sync.Mutex
+var idCounter = big.NewInt(0).SetBytes(randomBytes(12))
+
+// Helper for a resource to generate a unique identifier w/ default prefix
+func UniqueId() string {
+ return PrefixedUniqueId(UniqueIdPrefix)
+}
+
+// Helper for a resource to generate a unique identifier w/ given prefix
+//
+// After the prefix, the ID consists of an incrementing 26 digit value (to match
+// previous timestamp output).
+func PrefixedUniqueId(prefix string) string {
+ idMutex.Lock()
+ defer idMutex.Unlock()
+ return fmt.Sprintf("%s%026x", prefix, idCounter.Add(idCounter, big.NewInt(1)))
+}
+
+func randomBytes(n int) []byte {
+ b := make([]byte, n)
+ rand.Read(b)
+ return b
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/map.go b/vendor/github.com/hashicorp/terraform/helper/resource/map.go
new file mode 100644
index 00000000..a465136f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/map.go
@@ -0,0 +1,140 @@
+package resource
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Map is a map of resources that are supported, and provides helpers for
+// more easily implementing a ResourceProvider.
+type Map struct {
+ Mapping map[string]Resource
+}
+
+func (m *Map) Validate(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ r, ok := m.Mapping[t]
+ if !ok {
+ return nil, []error{fmt.Errorf("Unknown resource type: %s", t)}
+ }
+
+ // If there is no validator set, then it is valid
+ if r.ConfigValidator == nil {
+ return nil, nil
+ }
+
+ return r.ConfigValidator.Validate(c)
+}
+
+// Apply performs a create or update depending on the diff, and calls
+// the proper function on the matching Resource.
+func (m *Map) Apply(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff,
+ meta interface{}) (*terraform.InstanceState, error) {
+ r, ok := m.Mapping[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
+ }
+
+ if d.Destroy || d.RequiresNew() {
+ if s.ID != "" {
+ // Destroy the resource if it is created
+ err := r.Destroy(s, meta)
+ if err != nil {
+ return s, err
+ }
+
+ s.ID = ""
+ }
+
+ // If we're only destroying, and not creating, then return now.
+ // Otherwise, we continue so that we can create a new resource.
+ if !d.RequiresNew() {
+ return nil, nil
+ }
+ }
+
+ var result *terraform.InstanceState
+ var err error
+ if s.ID == "" {
+ result, err = r.Create(s, d, meta)
+ } else {
+ if r.Update == nil {
+ return s, fmt.Errorf(
+ "Resource type '%s' doesn't support update",
+ info.Type)
+ }
+
+ result, err = r.Update(s, d, meta)
+ }
+ if result != nil {
+ if result.Attributes == nil {
+ result.Attributes = make(map[string]string)
+ }
+
+ result.Attributes["id"] = result.ID
+ }
+
+ return result, err
+}
+
+// Diff performs a diff on the proper resource type.
+func (m *Map) Diff(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig,
+ meta interface{}) (*terraform.InstanceDiff, error) {
+ r, ok := m.Mapping[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
+ }
+
+ return r.Diff(s, c, meta)
+}
+
+// Refresh performs a Refresh on the proper resource type.
+//
+// Refresh on the Resource won't be called if the state represents a
+// non-created resource (ID is blank).
+//
+// An error is returned if the resource isn't registered.
+func (m *Map) Refresh(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ meta interface{}) (*terraform.InstanceState, error) {
+ // If the resource isn't created, don't refresh.
+ if s.ID == "" {
+ return s, nil
+ }
+
+ r, ok := m.Mapping[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
+ }
+
+ return r.Refresh(s, meta)
+}
+
+// Resources returns all the resources that are supported by this
+// resource map and can be used to satisfy the Resources method of
+// a ResourceProvider.
+func (m *Map) Resources() []terraform.ResourceType {
+ ks := make([]string, 0, len(m.Mapping))
+ for k, _ := range m.Mapping {
+ ks = append(ks, k)
+ }
+ sort.Strings(ks)
+
+ rs := make([]terraform.ResourceType, 0, len(m.Mapping))
+ for _, k := range ks {
+ rs = append(rs, terraform.ResourceType{
+ Name: k,
+ })
+ }
+
+ return rs
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go
new file mode 100644
index 00000000..0d9c831a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go
@@ -0,0 +1,49 @@
+package resource
+
+import (
+ "github.com/hashicorp/terraform/helper/config"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+type Resource struct {
+ ConfigValidator *config.Validator
+ Create CreateFunc
+ Destroy DestroyFunc
+ Diff DiffFunc
+ Refresh RefreshFunc
+ Update UpdateFunc
+}
+
+// CreateFunc is a function that creates a resource that didn't previously
+// exist.
+type CreateFunc func(
+ *terraform.InstanceState,
+ *terraform.InstanceDiff,
+ interface{}) (*terraform.InstanceState, error)
+
+// DestroyFunc is a function that destroys a resource that previously
+// exists using the state.
+type DestroyFunc func(
+ *terraform.InstanceState,
+ interface{}) error
+
+// DiffFunc is a function that performs a diff of a resource.
+type DiffFunc func(
+ *terraform.InstanceState,
+ *terraform.ResourceConfig,
+ interface{}) (*terraform.InstanceDiff, error)
+
+// RefreshFunc is a function that performs a refresh of a specific type
+// of resource.
+type RefreshFunc func(
+ *terraform.InstanceState,
+ interface{}) (*terraform.InstanceState, error)
+
+// UpdateFunc is a function that is called to update a resource that
+// previously existed. The difference between this and CreateFunc is that
+// the diff is guaranteed to only contain attributes that don't require
+// a new resource.
+type UpdateFunc func(
+ *terraform.InstanceState,
+ *terraform.InstanceDiff,
+ interface{}) (*terraform.InstanceState, error)
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
new file mode 100644
index 00000000..37c586a1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
@@ -0,0 +1,259 @@
+package resource
+
+import (
+ "log"
+ "time"
+)
+
+var refreshGracePeriod = 30 * time.Second
+
+// StateRefreshFunc is a function type used for StateChangeConf that is
+// responsible for refreshing the item being watched for a state change.
+//
+// It returns three results. `result` is any object that will be returned
+// as the final object after waiting for state change. This allows you to
+// return the final updated object, for example an EC2 instance after refreshing
+// it.
+//
+// `state` is the latest state of that object. And `err` is any error that
+// may have happened while refreshing the state.
+type StateRefreshFunc func() (result interface{}, state string, err error)
+
+// StateChangeConf is the configuration struct used for `WaitForState`.
+type StateChangeConf struct {
+ Delay time.Duration // Wait this time before starting checks
+ Pending []string // States that are "allowed" and will continue trying
+ Refresh StateRefreshFunc // Refreshes the current state
+ Target []string // Target state
+ Timeout time.Duration // The amount of time to wait before timeout
+ MinTimeout time.Duration // Smallest time to wait before refreshes
+ PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
+ NotFoundChecks int // Number of times to allow not found
+
+ // This is to work around inconsistent APIs
+ ContinuousTargetOccurence int // Number of times the Target state has to occur continuously
+}
+
+// WaitForState watches an object and waits for it to achieve the state
+// specified in the configuration using the specified Refresh() func,
+// waiting the number of seconds specified in the timeout configuration.
+//
+// If the Refresh function returns a error, exit immediately with that error.
+//
+// If the Refresh function returns a state other than the Target state or one
+// listed in Pending, return immediately with an error.
+//
+// If the Timeout is exceeded before reaching the Target state, return an
+// error.
+//
+// Otherwise, result the result of the first call to the Refresh function to
+// reach the target state.
+func (conf *StateChangeConf) WaitForState() (interface{}, error) {
+ log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target)
+
+ notfoundTick := 0
+ targetOccurence := 0
+
+ // Set a default for times to check for not found
+ if conf.NotFoundChecks == 0 {
+ conf.NotFoundChecks = 20
+ }
+
+ if conf.ContinuousTargetOccurence == 0 {
+ conf.ContinuousTargetOccurence = 1
+ }
+
+ type Result struct {
+ Result interface{}
+ State string
+ Error error
+ Done bool
+ }
+
+ // Read every result from the refresh loop, waiting for a positive result.Done.
+ resCh := make(chan Result, 1)
+ // cancellation channel for the refresh loop
+ cancelCh := make(chan struct{})
+
+ result := Result{}
+
+ go func() {
+ defer close(resCh)
+
+ time.Sleep(conf.Delay)
+
+ // start with 0 delay for the first loop
+ var wait time.Duration
+
+ for {
+ // store the last result
+ resCh <- result
+
+ // wait and watch for cancellation
+ select {
+ case <-cancelCh:
+ return
+ case <-time.After(wait):
+ // first round had no wait
+ if wait == 0 {
+ wait = 100 * time.Millisecond
+ }
+ }
+
+ res, currentState, err := conf.Refresh()
+ result = Result{
+ Result: res,
+ State: currentState,
+ Error: err,
+ }
+
+ if err != nil {
+ resCh <- result
+ return
+ }
+
+ // If we're waiting for the absence of a thing, then return
+ if res == nil && len(conf.Target) == 0 {
+ targetOccurence++
+ if conf.ContinuousTargetOccurence == targetOccurence {
+ result.Done = true
+ resCh <- result
+ return
+ }
+ continue
+ }
+
+ if res == nil {
+ // If we didn't find the resource, check if we have been
+ // not finding it for awhile, and if so, report an error.
+ notfoundTick++
+ if notfoundTick > conf.NotFoundChecks {
+ result.Error = &NotFoundError{
+ LastError: err,
+ Retries: notfoundTick,
+ }
+ resCh <- result
+ return
+ }
+ } else {
+ // Reset the counter for when a resource isn't found
+ notfoundTick = 0
+ found := false
+
+ for _, allowed := range conf.Target {
+ if currentState == allowed {
+ found = true
+ targetOccurence++
+ if conf.ContinuousTargetOccurence == targetOccurence {
+ result.Done = true
+ resCh <- result
+ return
+ }
+ continue
+ }
+ }
+
+ for _, allowed := range conf.Pending {
+ if currentState == allowed {
+ found = true
+ targetOccurence = 0
+ break
+ }
+ }
+
+ if !found && len(conf.Pending) > 0 {
+ result.Error = &UnexpectedStateError{
+ LastError: err,
+ State: result.State,
+ ExpectedState: conf.Target,
+ }
+ resCh <- result
+ return
+ }
+ }
+
+ // Wait between refreshes using exponential backoff, except when
+ // waiting for the target state to reoccur.
+ if targetOccurence == 0 {
+ wait *= 2
+ }
+
+ // If a poll interval has been specified, choose that interval.
+ // Otherwise bound the default value.
+ if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
+ wait = conf.PollInterval
+ } else {
+ if wait < conf.MinTimeout {
+ wait = conf.MinTimeout
+ } else if wait > 10*time.Second {
+ wait = 10 * time.Second
+ }
+ }
+
+ log.Printf("[TRACE] Waiting %s before next try", wait)
+ }
+ }()
+
+ // store the last value result from the refresh loop
+ lastResult := Result{}
+
+ timeout := time.After(conf.Timeout)
+ for {
+ select {
+ case r, ok := <-resCh:
+ // channel closed, so return the last result
+ if !ok {
+ return lastResult.Result, lastResult.Error
+ }
+
+ // we reached the intended state
+ if r.Done {
+ return r.Result, r.Error
+ }
+
+ // still waiting, store the last result
+ lastResult = r
+
+ case <-timeout:
+ log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout)
+ log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
+
+ // cancel the goroutine and start our grace period timer
+ close(cancelCh)
+ timeout := time.After(refreshGracePeriod)
+
+ // we need a for loop and a label to break on, because we may have
+ // an extra response value to read, but still want to wait for the
+ // channel to close.
+ forSelect:
+ for {
+ select {
+ case r, ok := <-resCh:
+ if r.Done {
+ // the last refresh loop reached the desired state
+ return r.Result, r.Error
+ }
+
+ if !ok {
+ // the goroutine returned
+ break forSelect
+ }
+
+ // target state not reached, save the result for the
+ // TimeoutError and wait for the channel to close
+ lastResult = r
+ case <-timeout:
+ log.Println("[ERROR] WaitForState exceeded refresh grace period")
+ break forSelect
+ }
+ }
+
+ return nil, &TimeoutError{
+ LastError: lastResult.Error,
+ LastState: lastResult.State,
+ Timeout: conf.Timeout,
+ ExpectedState: conf.Target,
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
new file mode 100644
index 00000000..04367c53
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
@@ -0,0 +1,790 @@
+package resource
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strings"
+ "testing"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/hashicorp/go-getter"
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/logging"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+const TestEnvVar = "TF_ACC"
+
+// TestProvider can be implemented by any ResourceProvider to provide custom
+// reset functionality at the start of an acceptance test.
+// The helper/schema Provider implements this interface.
+type TestProvider interface {
+ TestReset() error
+}
+
+// TestCheckFunc is the callback type used with acceptance tests to check
+// the state of a resource. The state passed in is the latest state known,
+// or in the case of being after a destroy, it is the last known state when
+// it was created.
+type TestCheckFunc func(*terraform.State) error
+
+// ImportStateCheckFunc is the check function for ImportState tests
+type ImportStateCheckFunc func([]*terraform.InstanceState) error
+
+// TestCase is a single acceptance test case used to test the apply/destroy
+// lifecycle of a resource in a specific configuration.
+//
+// When the destroy plan is executed, the config from the last TestStep
+// is used to plan it.
+type TestCase struct {
+ // IsUnitTest allows a test to run regardless of the TF_ACC
+ // environment variable. This should be used with care - only for
+ // fast tests on local resources (e.g. remote state with a local
+ // backend) but can be used to increase confidence in correct
+ // operation of Terraform without waiting for a full acctest run.
+ IsUnitTest bool
+
+ // PreCheck, if non-nil, will be called before any test steps are
+ // executed. It will only be executed in the case that the steps
+ // would run, so it can be used for some validation before running
+ // acceptance tests, such as verifying that keys are setup.
+ PreCheck func()
+
+ // Providers is the ResourceProvider that will be under test.
+ //
+ // Alternately, ProviderFactories can be specified for the providers
+ // that are valid. This takes priority over Providers.
+ //
+ // The end effect of each is the same: specifying the providers that
+ // are used within the tests.
+ Providers map[string]terraform.ResourceProvider
+ ProviderFactories map[string]terraform.ResourceProviderFactory
+
+ // PreventPostDestroyRefresh can be set to true for cases where data sources
+ // are tested alongside real resources
+ PreventPostDestroyRefresh bool
+
+ // CheckDestroy is called after the resource is finally destroyed
+ // to allow the tester to test that the resource is truly gone.
+ CheckDestroy TestCheckFunc
+
+ // Steps are the apply sequences done within the context of the
+ // same state. Each step can have its own check to verify correctness.
+ Steps []TestStep
+
+ // The settings below control the "ID-only refresh test." This is
+ // an enabled-by-default test that tests that a refresh can be
+ // refreshed with only an ID to result in the same attributes.
+ // This validates completeness of Refresh.
+ //
+ // IDRefreshName is the name of the resource to check. This will
+ // default to the first non-nil primary resource in the state.
+ //
+ // IDRefreshIgnore is a list of configuration keys that will be ignored.
+ IDRefreshName string
+ IDRefreshIgnore []string
+}
+
+// TestStep is a single apply sequence of a test, done within the
+// context of a state.
+//
+// Multiple TestSteps can be sequenced in a Test to allow testing
+// potentially complex update logic. In general, simply create/destroy
+// tests will only need one step.
+type TestStep struct {
+ // ResourceName should be set to the name of the resource
+ // that is being tested. Example: "aws_instance.foo". Various test
+ // modes use this to auto-detect state information.
+ //
+ // This is only required if the test mode settings below say it is
+ // for the mode you're using.
+ ResourceName string
+
+ // PreConfig is called before the Config is applied to perform any per-step
+ // setup that needs to happen. This is called regardless of "test mode"
+ // below.
+ PreConfig func()
+
+ //---------------------------------------------------------------
+ // Test modes. One of the following groups of settings must be
+ // set to determine what the test step will do. Ideally we would've
+ // used Go interfaces here but there are now hundreds of tests we don't
+ // want to re-type so instead we just determine which step logic
+ // to run based on what settings below are set.
+ //---------------------------------------------------------------
+
+ //---------------------------------------------------------------
+ // Plan, Apply testing
+ //---------------------------------------------------------------
+
+ // Config a string of the configuration to give to Terraform. If this
+ // is set, then the TestCase will execute this step with the same logic
+ // as a `terraform apply`.
+ Config string
+
+ // Check is called after the Config is applied. Use this step to
+ // make your own API calls to check the status of things, and to
+ // inspect the format of the ResourceState itself.
+ //
+ // If an error is returned, the test will fail. In this case, a
+ // destroy plan will still be attempted.
+ //
+ // If this is nil, no check is done on this step.
+ Check TestCheckFunc
+
+ // Destroy will create a destroy plan if set to true.
+ Destroy bool
+
+ // ExpectNonEmptyPlan can be set to true for specific types of tests that are
+ // looking to verify that a diff occurs
+ ExpectNonEmptyPlan bool
+
+ // ExpectError allows the construction of test cases that we expect to fail
+ // with an error. The specified regexp must match against the error for the
+ // test to pass.
+ ExpectError *regexp.Regexp
+
+ // PlanOnly can be set to only run `plan` with this configuration, and not
+ // actually apply it. This is useful for ensuring config changes result in
+ // no-op plans
+ PlanOnly bool
+
+ // PreventPostDestroyRefresh can be set to true for cases where data sources
+ // are tested alongside real resources
+ PreventPostDestroyRefresh bool
+
+ //---------------------------------------------------------------
+ // ImportState testing
+ //---------------------------------------------------------------
+
+ // ImportState, if true, will test the functionality of ImportState
+ // by importing the resource with ResourceName (must be set) and the
+ // ID of that resource.
+ ImportState bool
+
+ // ImportStateId is the ID to perform an ImportState operation with.
+ // This is optional. If it isn't set, then the resource ID is automatically
+ // determined by inspecting the state for ResourceName's ID.
+ ImportStateId string
+
+ // ImportStateIdPrefix is the prefix added in front of ImportStateId.
+ // This can be useful in complex import cases, where more than one
+ // attribute needs to be passed on as the Import ID. Mainly in cases
+ // where the ID is not known, and a known prefix needs to be added to
+ // the unset ImportStateId field.
+ ImportStateIdPrefix string
+
+ // ImportStateCheck checks the results of ImportState. It should be
+ // used to verify that the resulting value of ImportState has the
+ // proper resources, IDs, and attributes.
+ ImportStateCheck ImportStateCheckFunc
+
+ // ImportStateVerify, if true, will also check that the state values
+ // that are finally put into the state after import match for all the
+ // IDs returned by the Import.
+ //
+ // ImportStateVerifyIgnore are fields that should not be verified to
+ // be equal. These can be set to ephemeral fields or fields that can't
+ // be refreshed and don't matter.
+ ImportStateVerify bool
+ ImportStateVerifyIgnore []string
+}
+
+// Test performs an acceptance test on a resource.
+//
+// Tests are not run unless an environmental variable "TF_ACC" is
+// set to some non-empty value. This is to avoid test cases surprising
+// a user by creating real resources.
+//
+// Tests will fail unless the verbose flag (`go test -v`, or explicitly
+// the "-test.v" flag) is set. Because some acceptance tests take quite
+// long, we require the verbose flag so users are able to see progress
+// output.
+func Test(t TestT, c TestCase) {
+ // We only run acceptance tests if an env var is set because they're
+ // slow and generally require some outside configuration. You can opt out
+ // of this with OverrideEnvVar on individual TestCases.
+ if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest {
+ t.Skip(fmt.Sprintf(
+ "Acceptance tests skipped unless env '%s' set",
+ TestEnvVar))
+ return
+ }
+
+ logWriter, err := logging.LogOutput()
+ if err != nil {
+ t.Error(fmt.Errorf("error setting up logging: %s", err))
+ }
+ log.SetOutput(logWriter)
+
+ // We require verbose mode so that the user knows what is going on.
+ if !testTesting && !testing.Verbose() && !c.IsUnitTest {
+ t.Fatal("Acceptance tests must be run with the -v flag on tests")
+ return
+ }
+
+ // Run the PreCheck if we have it
+ if c.PreCheck != nil {
+ c.PreCheck()
+ }
+
+ ctxProviders, err := testProviderFactories(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ opts := terraform.ContextOpts{Providers: ctxProviders}
+
+ // A single state variable to track the lifecycle, starting with no state
+ var state *terraform.State
+
+ // Go through each step and run it
+ var idRefreshCheck *terraform.ResourceState
+ idRefresh := c.IDRefreshName != ""
+ errored := false
+ for i, step := range c.Steps {
+ var err error
+ log.Printf("[WARN] Test: Executing step %d", i)
+
+ // Determine the test mode to execute
+ if step.Config != "" {
+ state, err = testStepConfig(opts, state, step)
+ } else if step.ImportState {
+ state, err = testStepImportState(opts, state, step)
+ } else {
+ err = fmt.Errorf(
+ "unknown test mode for step. Please see TestStep docs\n\n%#v",
+ step)
+ }
+
+ // If there was an error, exit
+ if err != nil {
+ // Perhaps we expected an error? Check if it matches
+ if step.ExpectError != nil {
+ if !step.ExpectError.MatchString(err.Error()) {
+ errored = true
+ t.Error(fmt.Sprintf(
+ "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n",
+ i, err, step.ExpectError))
+ break
+ }
+ } else {
+ errored = true
+ t.Error(fmt.Sprintf(
+ "Step %d error: %s", i, err))
+ break
+ }
+ }
+
+ // If we've never checked an id-only refresh and our state isn't
+ // empty, find the first resource and test it.
+ if idRefresh && idRefreshCheck == nil && !state.Empty() {
+ // Find the first non-nil resource in the state
+ for _, m := range state.Modules {
+ if len(m.Resources) > 0 {
+ if v, ok := m.Resources[c.IDRefreshName]; ok {
+ idRefreshCheck = v
+ }
+
+ break
+ }
+ }
+
+ // If we have an instance to check for refreshes, do it
+ // immediately. We do it in the middle of another test
+ // because it shouldn't affect the overall state (refresh
+ // is read-only semantically) and we want to fail early if
+ // this fails. If refresh isn't read-only, then this will have
+ // caught a different bug.
+ if idRefreshCheck != nil {
+ log.Printf(
+ "[WARN] Test: Running ID-only refresh check on %s",
+ idRefreshCheck.Primary.ID)
+ if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil {
+ log.Printf("[ERROR] Test: ID-only test failed: %s", err)
+ t.Error(fmt.Sprintf(
+ "[ERROR] Test: ID-only test failed: %s", err))
+ break
+ }
+ }
+ }
+ }
+
+ // If we never checked an id-only refresh, it is a failure.
+ if idRefresh {
+ if !errored && len(c.Steps) > 0 && idRefreshCheck == nil {
+ t.Error("ID-only refresh check never ran.")
+ }
+ }
+
+ // If we have a state, then run the destroy
+ if state != nil {
+ lastStep := c.Steps[len(c.Steps)-1]
+ destroyStep := TestStep{
+ Config: lastStep.Config,
+ Check: c.CheckDestroy,
+ Destroy: true,
+ PreventPostDestroyRefresh: c.PreventPostDestroyRefresh,
+ }
+
+ log.Printf("[WARN] Test: Executing destroy step")
+ state, err := testStep(opts, state, destroyStep)
+ if err != nil {
+ t.Error(fmt.Sprintf(
+ "Error destroying resource! WARNING: Dangling resources\n"+
+ "may exist. The full state and error is shown below.\n\n"+
+ "Error: %s\n\nState: %s",
+ err,
+ state))
+ }
+ } else {
+ log.Printf("[WARN] Skipping destroy test since there is no state.")
+ }
+}
+
+// testProviderFactories is a helper to build the ResourceProviderFactory map
+// with pre instantiated ResourceProviders, so that we can reset them for the
+// test, while only calling the factory function once.
+// Any errors are stored so that they can be returned by the factory in
+// terraform to match non-test behavior.
+func testProviderFactories(c TestCase) (map[string]terraform.ResourceProviderFactory, error) {
+ ctxProviders := c.ProviderFactories // make(map[string]terraform.ResourceProviderFactory)
+ if ctxProviders == nil {
+ ctxProviders = make(map[string]terraform.ResourceProviderFactory)
+ }
+ // add any fixed providers
+ for k, p := range c.Providers {
+ ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p)
+ }
+
+ // reset the providers if needed
+ for k, pf := range ctxProviders {
+ // we can ignore any errors here, if we don't have a provider to reset
+ // the error will be handled later
+ p, err := pf()
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := p.(TestProvider); ok {
+ err := p.TestReset()
+ if err != nil {
+ return nil, fmt.Errorf("[ERROR] failed to reset provider %q: %s", k, err)
+ }
+ }
+ }
+
+ return ctxProviders, nil
+}
+
+// UnitTest is a helper to force the acceptance testing harness to run in the
+// normal unit test suite. This should only be used for resource that don't
+// have any external dependencies.
+func UnitTest(t TestT, c TestCase) {
+ c.IsUnitTest = true
+ Test(t, c)
+}
+
+func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error {
+ // TODO: We guard by this right now so master doesn't explode. We
+ // need to remove this eventually to make this part of the normal tests.
+ if os.Getenv("TF_ACC_IDONLY") == "" {
+ return nil
+ }
+
+ name := fmt.Sprintf("%s.foo", r.Type)
+
+ // Build the state. The state is just the resource with an ID. There
+ // are no attributes. We only set what is needed to perform a refresh.
+ state := terraform.NewState()
+ state.RootModule().Resources[name] = &terraform.ResourceState{
+ Type: r.Type,
+ Primary: &terraform.InstanceState{
+ ID: r.Primary.ID,
+ },
+ }
+
+ // Create the config module. We use the full config because Refresh
+ // doesn't have access to it and we may need things like provider
+ // configurations. The initial implementation of id-only checks used
+ // an empty config module, but that caused the aforementioned problems.
+ mod, err := testModule(opts, step)
+ if err != nil {
+ return err
+ }
+
+ // Initialize the context
+ opts.Module = mod
+ opts.State = state
+ ctx, err := terraform.NewContext(&opts)
+ if err != nil {
+ return err
+ }
+ if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {
+ if len(es) > 0 {
+ estrs := make([]string, len(es))
+ for i, e := range es {
+ estrs[i] = e.Error()
+ }
+ return fmt.Errorf(
+ "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
+ ws, estrs)
+ }
+
+ log.Printf("[WARN] Config warnings: %#v", ws)
+ }
+
+ // Refresh!
+ state, err = ctx.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error refreshing: %s", err)
+ }
+
+ // Verify attribute equivalence.
+ actualR := state.RootModule().Resources[name]
+ if actualR == nil {
+ return fmt.Errorf("Resource gone!")
+ }
+ if actualR.Primary == nil {
+ return fmt.Errorf("Resource has no primary instance")
+ }
+ actual := actualR.Primary.Attributes
+ expected := r.Primary.Attributes
+ // Remove fields we're ignoring
+ for _, v := range c.IDRefreshIgnore {
+ for k, _ := range actual {
+ if strings.HasPrefix(k, v) {
+ delete(actual, k)
+ }
+ }
+ for k, _ := range expected {
+ if strings.HasPrefix(k, v) {
+ delete(expected, k)
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ // Determine only the different attributes
+ for k, v := range expected {
+ if av, ok := actual[k]; ok && v == av {
+ delete(expected, k)
+ delete(actual, k)
+ }
+ }
+
+ spewConf := spew.NewDefaultConfig()
+ spewConf.SortKeys = true
+ return fmt.Errorf(
+ "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
+ "\n\n%s\n\n%s",
+ spewConf.Sdump(actual), spewConf.Sdump(expected))
+ }
+
+ return nil
+}
+
+func testModule(
+ opts terraform.ContextOpts,
+ step TestStep) (*module.Tree, error) {
+ if step.PreConfig != nil {
+ step.PreConfig()
+ }
+
+ cfgPath, err := ioutil.TempDir("", "tf-test")
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error creating temporary directory for config: %s", err)
+ }
+ defer os.RemoveAll(cfgPath)
+
+ // Write the configuration
+ cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf"))
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error creating temporary file for config: %s", err)
+ }
+
+ _, err = io.Copy(cfgF, strings.NewReader(step.Config))
+ cfgF.Close()
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error creating temporary file for config: %s", err)
+ }
+
+ // Parse the configuration
+ mod, err := module.NewTreeModule("", cfgPath)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error loading configuration: %s", err)
+ }
+
+ // Load the modules
+ modStorage := &getter.FolderStorage{
+ StorageDir: filepath.Join(cfgPath, ".tfmodules"),
+ }
+ err = mod.Load(modStorage, module.GetModeGet)
+ if err != nil {
+ return nil, fmt.Errorf("Error downloading modules: %s", err)
+ }
+
+ return mod, nil
+}
+
+func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) {
+ if c.ResourceName == "" {
+ return nil, fmt.Errorf("ResourceName must be set in TestStep")
+ }
+
+ for _, m := range state.Modules {
+ if len(m.Resources) > 0 {
+ if v, ok := m.Resources[c.ResourceName]; ok {
+ return v, nil
+ }
+ }
+ }
+
+ return nil, fmt.Errorf(
+ "Resource specified by ResourceName couldn't be found: %s", c.ResourceName)
+}
+
+// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into
+// a single TestCheckFunc.
+//
+// As a user testing their provider, this lets you decompose your checks
+// into smaller pieces more easily.
+func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {
+ return func(s *terraform.State) error {
+ for i, f := range fs {
+ if err := f(s); err != nil {
+ return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)
+ }
+ }
+
+ return nil
+ }
+}
+
+// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into
+// a single TestCheckFunc.
+//
+// As a user testing their provider, this lets you decompose your checks
+// into smaller pieces more easily.
+//
+// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the
+// TestCheckFuncs and aggregates failures.
+func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {
+ return func(s *terraform.State) error {
+ var result *multierror.Error
+
+ for i, f := range fs {
+ if err := f(s); err != nil {
+ result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err))
+ }
+ }
+
+ return result.ErrorOrNil()
+ }
+}
+
+// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value
+// exists in state for the given name/key combination. It is useful when
+// testing that computed values were set, when it is not possible to
+// know ahead of time what the values will be.
+func TestCheckResourceAttrSet(name, key string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ is, err := primaryInstanceState(s, name)
+ if err != nil {
+ return err
+ }
+
+ if val, ok := is.Attributes[key]; ok && val != "" {
+ return nil
+ }
+
+ return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key)
+ }
+}
+
+// TestCheckResourceAttr is a TestCheckFunc which validates
+// the value in state for the given name/key combination.
+func TestCheckResourceAttr(name, key, value string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ is, err := primaryInstanceState(s, name)
+ if err != nil {
+ return err
+ }
+
+ if v, ok := is.Attributes[key]; !ok || v != value {
+ if !ok {
+ return fmt.Errorf("%s: Attribute '%s' not found", name, key)
+ }
+
+ return fmt.Errorf(
+ "%s: Attribute '%s' expected %#v, got %#v",
+ name,
+ key,
+ value,
+ v)
+ }
+
+ return nil
+ }
+}
+
+// TestCheckNoResourceAttr is a TestCheckFunc which ensures that
+// NO value exists in state for the given name/key combination.
+func TestCheckNoResourceAttr(name, key string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ is, err := primaryInstanceState(s, name)
+ if err != nil {
+ return err
+ }
+
+ if _, ok := is.Attributes[key]; ok {
+ return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key)
+ }
+
+ return nil
+ }
+}
+
+// TestMatchResourceAttr is a TestCheckFunc which checks that the value
+// in state for the given name/key combination matches the given regex.
+func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {
+ return func(s *terraform.State) error {
+ is, err := primaryInstanceState(s, name)
+ if err != nil {
+ return err
+ }
+
+ if !r.MatchString(is.Attributes[key]) {
+ return fmt.Errorf(
+ "%s: Attribute '%s' didn't match %q, got %#v",
+ name,
+ key,
+ r.String(),
+ is.Attributes[key])
+ }
+
+ return nil
+ }
+}
+
+// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the
+// value is a pointer so that it can be updated while the test is running.
+// It will only be dereferenced at the point this step is run.
+func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ return TestCheckResourceAttr(name, key, *value)(s)
+ }
+}
+
+// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values
+// in state for a pair of name/key combinations are equal.
+func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ isFirst, err := primaryInstanceState(s, nameFirst)
+ if err != nil {
+ return err
+ }
+ vFirst, ok := isFirst.Attributes[keyFirst]
+ if !ok {
+ return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst)
+ }
+
+ isSecond, err := primaryInstanceState(s, nameSecond)
+ if err != nil {
+ return err
+ }
+ vSecond, ok := isSecond.Attributes[keySecond]
+ if !ok {
+ return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond)
+ }
+
+ if vFirst != vSecond {
+ return fmt.Errorf(
+ "%s: Attribute '%s' expected %#v, got %#v",
+ nameFirst,
+ keyFirst,
+ vSecond,
+ vFirst)
+ }
+
+ return nil
+ }
+}
+
+// TestCheckOutput checks an output in the Terraform configuration
+func TestCheckOutput(name, value string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ ms := s.RootModule()
+ rs, ok := ms.Outputs[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ if rs.Value != value {
+ return fmt.Errorf(
+ "Output '%s': expected %#v, got %#v",
+ name,
+ value,
+ rs)
+ }
+
+ return nil
+ }
+}
+
+func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc {
+ return func(s *terraform.State) error {
+ ms := s.RootModule()
+ rs, ok := ms.Outputs[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ if !r.MatchString(rs.Value.(string)) {
+ return fmt.Errorf(
+ "Output '%s': %#v didn't match %q",
+ name,
+ rs,
+ r.String())
+ }
+
+ return nil
+ }
+}
+
+// TestT is the interface used to handle the test lifecycle of a test.
+//
+// Users should just use a *testing.T object, which implements this.
+type TestT interface {
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Skip(args ...interface{})
+}
+
+// This is set to true by unit tests to alter some behavior
+var testTesting = false
+
+// primaryInstanceState returns the primary instance state for the given resource name.
+func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) {
+ ms := s.RootModule()
+ rs, ok := ms.Resources[name]
+ if !ok {
+ return nil, fmt.Errorf("Not found: %s", name)
+ }
+
+ is := rs.Primary
+ if is == nil {
+ return nil, fmt.Errorf("No primary instance: %s", name)
+ }
+
+ return is, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
new file mode 100644
index 00000000..537a11c3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
@@ -0,0 +1,160 @@
+package resource
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// testStepConfig runs a config-mode test step
+func testStepConfig(
+ opts terraform.ContextOpts,
+ state *terraform.State,
+ step TestStep) (*terraform.State, error) {
+ return testStep(opts, state, step)
+}
+
+func testStep(
+ opts terraform.ContextOpts,
+ state *terraform.State,
+ step TestStep) (*terraform.State, error) {
+ mod, err := testModule(opts, step)
+ if err != nil {
+ return state, err
+ }
+
+ // Build the context
+ opts.Module = mod
+ opts.State = state
+ opts.Destroy = step.Destroy
+ ctx, err := terraform.NewContext(&opts)
+ if err != nil {
+ return state, fmt.Errorf("Error initializing context: %s", err)
+ }
+ if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {
+ if len(es) > 0 {
+ estrs := make([]string, len(es))
+ for i, e := range es {
+ estrs[i] = e.Error()
+ }
+ return state, fmt.Errorf(
+ "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
+ ws, estrs)
+ }
+ log.Printf("[WARN] Config warnings: %#v", ws)
+ }
+
+ // Refresh!
+ state, err = ctx.Refresh()
+ if err != nil {
+ return state, fmt.Errorf(
+ "Error refreshing: %s", err)
+ }
+
+ // If this step is a PlanOnly step, skip over this first Plan and subsequent
+ // Apply, and use the follow up Plan that checks for perpetual diffs
+ if !step.PlanOnly {
+ // Plan!
+ if p, err := ctx.Plan(); err != nil {
+ return state, fmt.Errorf(
+ "Error planning: %s", err)
+ } else {
+ log.Printf("[WARN] Test: Step plan: %s", p)
+ }
+
+ // We need to keep a copy of the state prior to destroying
+ // such that destroy steps can verify their behaviour in the check
+ // function
+ stateBeforeApplication := state.DeepCopy()
+
+ // Apply!
+ state, err = ctx.Apply()
+ if err != nil {
+ return state, fmt.Errorf("Error applying: %s", err)
+ }
+
+ // Check! Excitement!
+ if step.Check != nil {
+ if step.Destroy {
+ if err := step.Check(stateBeforeApplication); err != nil {
+ return state, fmt.Errorf("Check failed: %s", err)
+ }
+ } else {
+ if err := step.Check(state); err != nil {
+ return state, fmt.Errorf("Check failed: %s", err)
+ }
+ }
+ }
+ }
+
+ // Now, verify that Plan is now empty and we don't have a perpetual diff issue
+ // We do this with TWO plans. One without a refresh.
+ var p *terraform.Plan
+ if p, err = ctx.Plan(); err != nil {
+ return state, fmt.Errorf("Error on follow-up plan: %s", err)
+ }
+ if p.Diff != nil && !p.Diff.Empty() {
+ if step.ExpectNonEmptyPlan {
+ log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p)
+ } else {
+ return state, fmt.Errorf(
+ "After applying this step, the plan was not empty:\n\n%s", p)
+ }
+ }
+
+ // And another after a Refresh.
+ if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) {
+ state, err = ctx.Refresh()
+ if err != nil {
+ return state, fmt.Errorf(
+ "Error on follow-up refresh: %s", err)
+ }
+ }
+ if p, err = ctx.Plan(); err != nil {
+ return state, fmt.Errorf("Error on second follow-up plan: %s", err)
+ }
+ empty := p.Diff == nil || p.Diff.Empty()
+
+ // Data resources are tricky because they legitimately get instantiated
+ // during refresh so that they will be already populated during the
+ // plan walk. Because of this, if we have any data resources in the
+ // config we'll end up wanting to destroy them again here. This is
+ // acceptable and expected, and we'll treat it as "empty" for the
+ // sake of this testing.
+ if step.Destroy {
+ empty = true
+
+ for _, moduleDiff := range p.Diff.Modules {
+ for k, instanceDiff := range moduleDiff.Resources {
+ if !strings.HasPrefix(k, "data.") {
+ empty = false
+ break
+ }
+
+ if !instanceDiff.Destroy {
+ empty = false
+ }
+ }
+ }
+ }
+
+ if !empty {
+ if step.ExpectNonEmptyPlan {
+ log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p)
+ } else {
+ return state, fmt.Errorf(
+ "After applying this step and refreshing, "+
+ "the plan was not empty:\n\n%s", p)
+ }
+ }
+
+ // Made it here, but expected a non-empty plan, fail!
+ if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) {
+ return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!")
+ }
+
+ // Made it here? Good job test step!
+ return state, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
new file mode 100644
index 00000000..28ad1052
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
@@ -0,0 +1,141 @@
+package resource
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strings"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// testStepImportState runs an imort state test step
+func testStepImportState(
+ opts terraform.ContextOpts,
+ state *terraform.State,
+ step TestStep) (*terraform.State, error) {
+ // Determine the ID to import
+ importId := step.ImportStateId
+ if importId == "" {
+ resource, err := testResource(step, state)
+ if err != nil {
+ return state, err
+ }
+
+ importId = resource.Primary.ID
+ }
+ importPrefix := step.ImportStateIdPrefix
+ if importPrefix != "" {
+ importId = fmt.Sprintf("%s%s", importPrefix, importId)
+ }
+
+ // Setup the context. We initialize with an empty state. We use the
+ // full config for provider configurations.
+ mod, err := testModule(opts, step)
+ if err != nil {
+ return state, err
+ }
+
+ opts.Module = mod
+ opts.State = terraform.NewState()
+ ctx, err := terraform.NewContext(&opts)
+ if err != nil {
+ return state, err
+ }
+
+ // Do the import!
+ newState, err := ctx.Import(&terraform.ImportOpts{
+ // Set the module so that any provider config is loaded
+ Module: mod,
+
+ Targets: []*terraform.ImportTarget{
+ &terraform.ImportTarget{
+ Addr: step.ResourceName,
+ ID: importId,
+ },
+ },
+ })
+ if err != nil {
+ log.Printf("[ERROR] Test: ImportState failure: %s", err)
+ return state, err
+ }
+
+ // Go through the new state and verify
+ if step.ImportStateCheck != nil {
+ var states []*terraform.InstanceState
+ for _, r := range newState.RootModule().Resources {
+ if r.Primary != nil {
+ states = append(states, r.Primary)
+ }
+ }
+ if err := step.ImportStateCheck(states); err != nil {
+ return state, err
+ }
+ }
+
+ // Verify that all the states match
+ if step.ImportStateVerify {
+ new := newState.RootModule().Resources
+ old := state.RootModule().Resources
+ for _, r := range new {
+ // Find the existing resource
+ var oldR *terraform.ResourceState
+ for _, r2 := range old {
+ if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type {
+ oldR = r2
+ break
+ }
+ }
+ if oldR == nil {
+ return state, fmt.Errorf(
+ "Failed state verification, resource with ID %s not found",
+ r.Primary.ID)
+ }
+
+ // Compare their attributes
+ actual := make(map[string]string)
+ for k, v := range r.Primary.Attributes {
+ actual[k] = v
+ }
+ expected := make(map[string]string)
+ for k, v := range oldR.Primary.Attributes {
+ expected[k] = v
+ }
+
+ // Remove fields we're ignoring
+ for _, v := range step.ImportStateVerifyIgnore {
+ for k, _ := range actual {
+ if strings.HasPrefix(k, v) {
+ delete(actual, k)
+ }
+ }
+ for k, _ := range expected {
+ if strings.HasPrefix(k, v) {
+ delete(expected, k)
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ // Determine only the different attributes
+ for k, v := range expected {
+ if av, ok := actual[k]; ok && v == av {
+ delete(expected, k)
+ delete(actual, k)
+ }
+ }
+
+ spewConf := spew.NewDefaultConfig()
+ spewConf.SortKeys = true
+ return state, fmt.Errorf(
+ "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
+ "\n\n%s\n\n%s",
+ spewConf.Sdump(actual), spewConf.Sdump(expected))
+ }
+ }
+ }
+
+ // Return the old state (non-imported) so we don't change anything.
+ return state, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
new file mode 100644
index 00000000..ca50e292
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
@@ -0,0 +1,84 @@
+package resource
+
+import (
+ "sync"
+ "time"
+)
+
+// Retry is a basic wrapper around StateChangeConf that will just retry
+// a function until it no longer returns an error.
+func Retry(timeout time.Duration, f RetryFunc) error {
+ // These are used to pull the error out of the function; need a mutex to
+ // avoid a data race.
+ var resultErr error
+ var resultErrMu sync.Mutex
+
+ c := &StateChangeConf{
+ Pending: []string{"retryableerror"},
+ Target: []string{"success"},
+ Timeout: timeout,
+ MinTimeout: 500 * time.Millisecond,
+ Refresh: func() (interface{}, string, error) {
+ rerr := f()
+
+ resultErrMu.Lock()
+ defer resultErrMu.Unlock()
+
+ if rerr == nil {
+ resultErr = nil
+ return 42, "success", nil
+ }
+
+ resultErr = rerr.Err
+
+ if rerr.Retryable {
+ return 42, "retryableerror", nil
+ }
+ return nil, "quit", rerr.Err
+ },
+ }
+
+ _, waitErr := c.WaitForState()
+
+ // Need to acquire the lock here to be able to avoid race using resultErr as
+ // the return value
+ resultErrMu.Lock()
+ defer resultErrMu.Unlock()
+
+ // resultErr may be nil because the wait timed out and resultErr was never
+ // set; this is still an error
+ if resultErr == nil {
+ return waitErr
+ }
+ // resultErr takes precedence over waitErr if both are set because it is
+ // more likely to be useful
+ return resultErr
+}
+
+// RetryFunc is the function retried until it succeeds.
+type RetryFunc func() *RetryError
+
+// RetryError is the required return type of RetryFunc. It forces client code
+// to choose whether or not a given error is retryable.
+type RetryError struct {
+ Err error
+ Retryable bool
+}
+
+// RetryableError is a helper to create a RetryError that's retryable from a
+// given error.
+func RetryableError(err error) *RetryError {
+ if err == nil {
+ return nil
+ }
+ return &RetryError{Err: err, Retryable: true}
+}
+
+// NonRetryableError is a helper to create a RetryError that's _not)_ retryable
+// from a given error.
+func NonRetryableError(err error) *RetryError {
+ if err == nil {
+ return nil
+ }
+ return &RetryError{Err: err, Retryable: false}
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/README.md b/vendor/github.com/hashicorp/terraform/helper/schema/README.md
new file mode 100644
index 00000000..28c83628
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/README.md
@@ -0,0 +1,11 @@
+# Terraform Helper Lib: schema
+
+The `schema` package provides a high-level interface for writing resource
+providers for Terraform.
+
+If you're writing a resource provider, we recommend you use this package.
+
+The interface exposed by this package is much friendlier than trying to
+write to the Terraform API directly. The core Terraform API is low-level
+and built for maximum flexibility and control, whereas this library is built
+as a framework around that to more easily write common providers.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
new file mode 100644
index 00000000..a0729c02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
@@ -0,0 +1,94 @@
+package schema
+
+import (
+ "context"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Backend represents a partial backend.Backend implementation and simplifies
+// the creation of configuration loading and validation.
+//
+// Unlike other schema structs such as Provider, this struct is meant to be
+// embedded within your actual implementation. It provides implementations
+// only for Input and Configure and gives you a method for accessing the
+// configuration in the form of a ResourceData that you're expected to call
+// from the other implementation funcs.
+type Backend struct {
+ // Schema is the schema for the configuration of this backend. If this
+ // Backend has no configuration this can be omitted.
+ Schema map[string]*Schema
+
+ // ConfigureFunc is called to configure the backend. Use the
+ // FromContext* methods to extract information from the context.
+ // This can be nil, in which case nothing will be called but the
+ // config will still be stored.
+ ConfigureFunc func(context.Context) error
+
+ config *ResourceData
+}
+
+var (
+ backendConfigKey = contextKey("backend config")
+)
+
+// FromContextBackendConfig extracts a ResourceData with the configuration
+// from the context. This should only be called by Backend functions.
+func FromContextBackendConfig(ctx context.Context) *ResourceData {
+ return ctx.Value(backendConfigKey).(*ResourceData)
+}
+
+func (b *Backend) Input(
+ input terraform.UIInput,
+ c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+ if b == nil {
+ return c, nil
+ }
+
+ return schemaMap(b.Schema).Input(input, c)
+}
+
+func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ if b == nil {
+ return nil, nil
+ }
+
+ return schemaMap(b.Schema).Validate(c)
+}
+
+func (b *Backend) Configure(c *terraform.ResourceConfig) error {
+ if b == nil {
+ return nil
+ }
+
+ sm := schemaMap(b.Schema)
+
+ // Get a ResourceData for this configuration. To do this, we actually
+ // generate an intermediary "diff" although that is never exposed.
+ diff, err := sm.Diff(nil, c)
+ if err != nil {
+ return err
+ }
+
+ data, err := sm.Data(nil, diff)
+ if err != nil {
+ return err
+ }
+ b.config = data
+
+ if b.ConfigureFunc != nil {
+ err = b.ConfigureFunc(context.WithValue(
+ context.Background(), backendConfigKey, data))
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Config returns the configuration. This is available after Configure is
+// called.
+func (b *Backend) Config() *ResourceData {
+ return b.config
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
new file mode 100644
index 00000000..5a03d2d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
@@ -0,0 +1,59 @@
+package schema
+
+import (
+ "fmt"
+)
+
+// DataSourceResourceShim takes a Resource instance describing a data source
+// (with a Read implementation and a Schema, at least) and returns a new
+// Resource instance with additional Create and Delete implementations that
+// allow the data source to be used as a resource.
+//
+// This is a backward-compatibility layer for data sources that were formerly
+// read-only resources before the data source concept was added. It should not
+// be used for any *new* data sources.
+//
+// The Read function for the data source *must* call d.SetId with a non-empty
+// id in order for this shim to function as expected.
+//
+// The provided Resource instance, and its schema, will be modified in-place
+// to make it suitable for use as a full resource.
+func DataSourceResourceShim(name string, dataSource *Resource) *Resource {
+ // Recursively, in-place adjust the schema so that it has ForceNew
+ // on any user-settable resource.
+ dataSourceResourceShimAdjustSchema(dataSource.Schema)
+
+ dataSource.Create = CreateFunc(dataSource.Read)
+ dataSource.Delete = func(d *ResourceData, meta interface{}) error {
+ d.SetId("")
+ return nil
+ }
+ dataSource.Update = nil // should already be nil, but let's make sure
+
+ // FIXME: Link to some further docs either on the website or in the
+ // changelog, once such a thing exists.
+ dataSource.deprecationMessage = fmt.Sprintf(
+ "using %s as a resource is deprecated; consider using the data source instead",
+ name,
+ )
+
+ return dataSource
+}
+
+func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) {
+ for _, s := range schema {
+ // If the attribute is configurable then it must be ForceNew,
+ // since we have no Update implementation.
+ if s.Required || s.Optional {
+ s.ForceNew = true
+ }
+
+ // If the attribute is a nested resource, we need to recursively
+ // apply these same adjustments to it.
+ if s.Elem != nil {
+ if r, ok := s.Elem.(*Resource); ok {
+ dataSourceResourceShimAdjustSchema(r.Schema)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go
new file mode 100644
index 00000000..d5e20e03
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go
@@ -0,0 +1,6 @@
+package schema
+
+// Equal is an interface that checks for deep equality between two objects.
+type Equal interface {
+ Equal(interface{}) bool
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
new file mode 100644
index 00000000..1660a670
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
@@ -0,0 +1,334 @@
+package schema
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// FieldReaders are responsible for decoding fields out of data into
+// the proper typed representation. ResourceData uses this to query data
+// out of multiple sources: config, state, diffs, etc.
+type FieldReader interface {
+ ReadField([]string) (FieldReadResult, error)
+}
+
+// FieldReadResult encapsulates all the resulting data from reading
+// a field.
+type FieldReadResult struct {
+ // Value is the actual read value. NegValue is the _negative_ value
+ // or the items that should be removed (if they existed). NegValue
+ // doesn't make sense for primitives but is important for any
+ // container types such as maps, sets, lists.
+ Value interface{}
+ ValueProcessed interface{}
+
+ // Exists is true if the field was found in the data. False means
+ // it wasn't found if there was no error.
+ Exists bool
+
+ // Computed is true if the field was found but the value
+ // is computed.
+ Computed bool
+}
+
+// ValueOrZero returns the value of this result or the zero value of the
+// schema type, ensuring a consistent non-nil return value.
+func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} {
+ if r.Value != nil {
+ return r.Value
+ }
+
+ return s.ZeroValue()
+}
+
+// addrToSchema finds the final element schema for the given address
+// and the given schema. It returns all the schemas that led to the final
+// schema. These are in order of the address (out to in).
+func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema {
+ current := &Schema{
+ Type: typeObject,
+ Elem: schemaMap,
+ }
+
+ // If we aren't given an address, then the user is requesting the
+ // full object, so we return the special value which is the full object.
+ if len(addr) == 0 {
+ return []*Schema{current}
+ }
+
+ result := make([]*Schema, 0, len(addr))
+ for len(addr) > 0 {
+ k := addr[0]
+ addr = addr[1:]
+
+ REPEAT:
+ // We want to trim off the first "typeObject" since its not a
+ // real lookup that people do. i.e. []string{"foo"} in a structure
+ // isn't {typeObject, typeString}, its just a {typeString}.
+ if len(result) > 0 || current.Type != typeObject {
+ result = append(result, current)
+ }
+
+ switch t := current.Type; t {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ if len(addr) > 0 {
+ return nil
+ }
+ case TypeList, TypeSet:
+ isIndex := len(addr) > 0 && addr[0] == "#"
+
+ switch v := current.Elem.(type) {
+ case *Resource:
+ current = &Schema{
+ Type: typeObject,
+ Elem: v.Schema,
+ }
+ case *Schema:
+ current = v
+ case ValueType:
+ current = &Schema{Type: v}
+ default:
+ // we may not know the Elem type and are just looking for the
+ // index
+ if isIndex {
+ break
+ }
+
+ if len(addr) == 0 {
+ // we've processed the address, so return what we've
+ // collected
+ return result
+ }
+
+ if len(addr) == 1 {
+ if _, err := strconv.Atoi(addr[0]); err == nil {
+ // we're indexing a value without a schema. This can
+ // happen if the list is nested in another schema type.
+ // Default to a TypeString like we do with a map
+ current = &Schema{Type: TypeString}
+ break
+ }
+ }
+
+ return nil
+ }
+
+ // If we only have one more thing and the next thing
+ // is a #, then we're accessing the index which is always
+ // an int.
+ if isIndex {
+ current = &Schema{Type: TypeInt}
+ break
+ }
+
+ case TypeMap:
+ if len(addr) > 0 {
+ switch v := current.Elem.(type) {
+ case ValueType:
+ current = &Schema{Type: v}
+ default:
+ // maps default to string values. This is all we can have
+ // if this is nested in another list or map.
+ current = &Schema{Type: TypeString}
+ }
+ }
+ case typeObject:
+ // If we're already in the object, then we want to handle Sets
+ // and Lists specially. Basically, their next key is the lookup
+ // key (the set value or the list element). For these scenarios,
+ // we just want to skip it and move to the next element if there
+ // is one.
+ if len(result) > 0 {
+ lastType := result[len(result)-2].Type
+ if lastType == TypeSet || lastType == TypeList {
+ if len(addr) == 0 {
+ break
+ }
+
+ k = addr[0]
+ addr = addr[1:]
+ }
+ }
+
+ m := current.Elem.(map[string]*Schema)
+ val, ok := m[k]
+ if !ok {
+ return nil
+ }
+
+ current = val
+ goto REPEAT
+ }
+ }
+
+ return result
+}
+
+// readListField is a generic method for reading a list field out of a
+// a FieldReader. It does this based on the assumption that there is a key
+// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc.
+// after that point.
+func readListField(
+ r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) {
+ addrPadded := make([]string, len(addr)+1)
+ copy(addrPadded, addr)
+ addrPadded[len(addrPadded)-1] = "#"
+
+ // Get the number of elements in the list
+ countResult, err := r.ReadField(addrPadded)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !countResult.Exists {
+ // No count, means we have no list
+ countResult.Value = 0
+ }
+
+ // If we have an empty list, then return an empty list
+ if countResult.Computed || countResult.Value.(int) == 0 {
+ return FieldReadResult{
+ Value: []interface{}{},
+ Exists: countResult.Exists,
+ Computed: countResult.Computed,
+ }, nil
+ }
+
+ // Go through each count, and get the item value out of it
+ result := make([]interface{}, countResult.Value.(int))
+ for i, _ := range result {
+ is := strconv.FormatInt(int64(i), 10)
+ addrPadded[len(addrPadded)-1] = is
+ rawResult, err := r.ReadField(addrPadded)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !rawResult.Exists {
+ // This should never happen, because by the time the data
+ // gets to the FieldReaders, all the defaults should be set by
+ // Schema.
+ rawResult.Value = nil
+ }
+
+ result[i] = rawResult.Value
+ }
+
+ return FieldReadResult{
+ Value: result,
+ Exists: true,
+ }, nil
+}
+
+// readObjectField is a generic method for reading objects out of FieldReaders
+// based on the assumption that building an address of []string{k, FIELD}
+// will result in the proper field data.
+func readObjectField(
+ r FieldReader,
+ addr []string,
+ schema map[string]*Schema) (FieldReadResult, error) {
+ result := make(map[string]interface{})
+ exists := false
+ for field, s := range schema {
+ addrRead := make([]string, len(addr), len(addr)+1)
+ copy(addrRead, addr)
+ addrRead = append(addrRead, field)
+ rawResult, err := r.ReadField(addrRead)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if rawResult.Exists {
+ exists = true
+ }
+
+ result[field] = rawResult.ValueOrZero(s)
+ }
+
+ return FieldReadResult{
+ Value: result,
+ Exists: exists,
+ }, nil
+}
+
+// convert map values to the proper primitive type based on schema.Elem
+func mapValuesToPrimitive(m map[string]interface{}, schema *Schema) error {
+
+ elemType := TypeString
+ if et, ok := schema.Elem.(ValueType); ok {
+ elemType = et
+ }
+
+ switch elemType {
+ case TypeInt, TypeFloat, TypeBool:
+ for k, v := range m {
+ vs, ok := v.(string)
+ if !ok {
+ continue
+ }
+
+ v, err := stringToPrimitive(vs, false, &Schema{Type: elemType})
+ if err != nil {
+ return err
+ }
+
+ m[k] = v
+ }
+ }
+ return nil
+}
+
+func stringToPrimitive(
+ value string, computed bool, schema *Schema) (interface{}, error) {
+ var returnVal interface{}
+ switch schema.Type {
+ case TypeBool:
+ if value == "" {
+ returnVal = false
+ break
+ }
+ if computed {
+ break
+ }
+
+ v, err := strconv.ParseBool(value)
+ if err != nil {
+ return nil, err
+ }
+
+ returnVal = v
+ case TypeFloat:
+ if value == "" {
+ returnVal = 0.0
+ break
+ }
+ if computed {
+ break
+ }
+
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ returnVal = v
+ case TypeInt:
+ if value == "" {
+ returnVal = 0
+ break
+ }
+ if computed {
+ break
+ }
+
+ v, err := strconv.ParseInt(value, 0, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ returnVal = int(v)
+ case TypeString:
+ returnVal = value
+ default:
+ panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+ }
+
+ return returnVal, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
new file mode 100644
index 00000000..f958bbcb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
@@ -0,0 +1,333 @@
+package schema
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/mapstructure"
+)
+
+// ConfigFieldReader reads fields out of an untyped map[string]string to the
+// best of its ability. It also applies defaults from the Schema. (The other
+// field readers do not need default handling because they source fully
+// populated data structures.)
+type ConfigFieldReader struct {
+ Config *terraform.ResourceConfig
+ Schema map[string]*Schema
+
+ indexMaps map[string]map[string]int
+ once sync.Once
+}
+
+func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) })
+ return r.readField(address, false)
+}
+
+func (r *ConfigFieldReader) readField(
+ address []string, nested bool) (FieldReadResult, error) {
+ schemaList := addrToSchema(address, r.Schema)
+ if len(schemaList) == 0 {
+ return FieldReadResult{}, nil
+ }
+
+ if !nested {
+ // If we have a set anywhere in the address, then we need to
+ // read that set out in order and actually replace that part of
+ // the address with the real list index. i.e. set.50 might actually
+ // map to set.12 in the config, since it is in list order in the
+ // config, not indexed by set value.
+ for i, v := range schemaList {
+ // Sets are the only thing that cause this issue.
+ if v.Type != TypeSet {
+ continue
+ }
+
+ // If we're at the end of the list, then we don't have to worry
+ // about this because we're just requesting the whole set.
+ if i == len(schemaList)-1 {
+ continue
+ }
+
+ // If we're looking for the count, then ignore...
+ if address[i+1] == "#" {
+ continue
+ }
+
+ indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")]
+ if !ok {
+ // Get the set so we can get the index map that tells us the
+ // mapping of the hash code to the list index
+ _, err := r.readSet(address[:i+1], v)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ indexMap = r.indexMaps[strings.Join(address[:i+1], ".")]
+ }
+
+ index, ok := indexMap[address[i+1]]
+ if !ok {
+ return FieldReadResult{}, nil
+ }
+
+ address[i+1] = strconv.FormatInt(int64(index), 10)
+ }
+ }
+
+ k := strings.Join(address, ".")
+ schema := schemaList[len(schemaList)-1]
+
+ // If we're getting the single element of a promoted list, then
+ // check to see if we have a single element we need to promote.
+ if address[len(address)-1] == "0" && len(schemaList) > 1 {
+ lastSchema := schemaList[len(schemaList)-2]
+ if lastSchema.Type == TypeList && lastSchema.PromoteSingle {
+ k := strings.Join(address[:len(address)-1], ".")
+ result, err := r.readPrimitive(k, schema)
+ if err == nil {
+ return result, nil
+ }
+ }
+ }
+
+ switch schema.Type {
+ case TypeBool, TypeFloat, TypeInt, TypeString:
+ return r.readPrimitive(k, schema)
+ case TypeList:
+ // If we support promotion then we first check if we have a lone
+ // value that we must promote.
+ // a value that is alone.
+ if schema.PromoteSingle {
+ result, err := r.readPrimitive(k, schema.Elem.(*Schema))
+ if err == nil && result.Exists {
+ result.Value = []interface{}{result.Value}
+ return result, nil
+ }
+ }
+
+ return readListField(&nestedConfigFieldReader{r}, address, schema)
+ case TypeMap:
+ return r.readMap(k, schema)
+ case TypeSet:
+ return r.readSet(address, schema)
+ case typeObject:
+ return readObjectField(
+ &nestedConfigFieldReader{r},
+ address, schema.Elem.(map[string]*Schema))
+ default:
+ panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+ }
+}
+
+func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
+ // We want both the raw value and the interpolated. We use the interpolated
+ // to store actual values and we use the raw one to check for
+ // computed keys. Actual values are obtained in the switch, depending on
+ // the type of the raw value.
+ mraw, ok := r.Config.GetRaw(k)
+ if !ok {
+ // check if this is from an interpolated field by seeing if it exists
+ // in the config
+ _, ok := r.Config.Get(k)
+ if !ok {
+ // this really doesn't exist
+ return FieldReadResult{}, nil
+ }
+
+ // We couldn't fetch the value from a nested data structure, so treat the
+ // raw value as an interpolation string. The mraw value is only used
+ // for the type switch below.
+ mraw = "${INTERPOLATED}"
+ }
+
+ result := make(map[string]interface{})
+ computed := false
+ switch m := mraw.(type) {
+ case string:
+ // This is a map which has come out of an interpolated variable, so we
+ // can just get the value directly from config. Values cannot be computed
+ // currently.
+ v, _ := r.Config.Get(k)
+
+ // If this isn't a map[string]interface, it must be computed.
+ mapV, ok := v.(map[string]interface{})
+ if !ok {
+ return FieldReadResult{
+ Exists: true,
+ Computed: true,
+ }, nil
+ }
+
+ // Otherwise we can proceed as usual.
+ for i, iv := range mapV {
+ result[i] = iv
+ }
+ case []interface{}:
+ for i, innerRaw := range m {
+ for ik := range innerRaw.(map[string]interface{}) {
+ key := fmt.Sprintf("%s.%d.%s", k, i, ik)
+ if r.Config.IsComputed(key) {
+ computed = true
+ break
+ }
+
+ v, _ := r.Config.Get(key)
+ result[ik] = v
+ }
+ }
+ case []map[string]interface{}:
+ for i, innerRaw := range m {
+ for ik := range innerRaw {
+ key := fmt.Sprintf("%s.%d.%s", k, i, ik)
+ if r.Config.IsComputed(key) {
+ computed = true
+ break
+ }
+
+ v, _ := r.Config.Get(key)
+ result[ik] = v
+ }
+ }
+ case map[string]interface{}:
+ for ik := range m {
+ key := fmt.Sprintf("%s.%s", k, ik)
+ if r.Config.IsComputed(key) {
+ computed = true
+ break
+ }
+
+ v, _ := r.Config.Get(key)
+ result[ik] = v
+ }
+ default:
+ panic(fmt.Sprintf("unknown type: %#v", mraw))
+ }
+
+ err := mapValuesToPrimitive(result, schema)
+ if err != nil {
+ return FieldReadResult{}, nil
+ }
+
+ var value interface{}
+ if !computed {
+ value = result
+ }
+
+ return FieldReadResult{
+ Value: value,
+ Exists: true,
+ Computed: computed,
+ }, nil
+}
+
+func (r *ConfigFieldReader) readPrimitive(
+ k string, schema *Schema) (FieldReadResult, error) {
+ raw, ok := r.Config.Get(k)
+ if !ok {
+ // Nothing in config, but we might still have a default from the schema
+ var err error
+ raw, err = schema.DefaultValue()
+ if err != nil {
+ return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err)
+ }
+
+ if raw == nil {
+ return FieldReadResult{}, nil
+ }
+ }
+
+ var result string
+ if err := mapstructure.WeakDecode(raw, &result); err != nil {
+ return FieldReadResult{}, err
+ }
+
+ computed := r.Config.IsComputed(k)
+ returnVal, err := stringToPrimitive(result, computed, schema)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+
+ return FieldReadResult{
+ Value: returnVal,
+ Exists: true,
+ Computed: computed,
+ }, nil
+}
+
+func (r *ConfigFieldReader) readSet(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ indexMap := make(map[string]int)
+ // Create the set that will be our result
+ set := schema.ZeroValue().(*Set)
+
+ raw, err := readListField(&nestedConfigFieldReader{r}, address, schema)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !raw.Exists {
+ return FieldReadResult{Value: set}, nil
+ }
+
+ // If the list is computed, the set is necessarilly computed
+ if raw.Computed {
+ return FieldReadResult{
+ Value: set,
+ Exists: true,
+ Computed: raw.Computed,
+ }, nil
+ }
+
+ // Build up the set from the list elements
+ for i, v := range raw.Value.([]interface{}) {
+ // Check if any of the keys in this item are computed
+ computed := r.hasComputedSubKeys(
+ fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema)
+
+ code := set.add(v, computed)
+ indexMap[code] = i
+ }
+
+ r.indexMaps[strings.Join(address, ".")] = indexMap
+
+ return FieldReadResult{
+ Value: set,
+ Exists: true,
+ }, nil
+}
+
+// hasComputedSubKeys walks through a schema and returns whether or not the
+// given key contains any subkeys that are computed.
+func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool {
+ prefix := key + "."
+
+ switch t := schema.Elem.(type) {
+ case *Resource:
+ for k, schema := range t.Schema {
+ if r.Config.IsComputed(prefix + k) {
+ return true
+ }
+
+ if r.hasComputedSubKeys(prefix+k, schema) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// nestedConfigFieldReader is a funny little thing that just wraps a
+// ConfigFieldReader to call readField when ReadField is called so that
+// we don't recalculate the set rewrites in the address, which leads to
+// an infinite loop.
+type nestedConfigFieldReader struct {
+ Reader *ConfigFieldReader
+}
+
+func (r *nestedConfigFieldReader) ReadField(
+ address []string) (FieldReadResult, error) {
+ return r.Reader.readField(address, true)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
new file mode 100644
index 00000000..16bbae29
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
@@ -0,0 +1,208 @@
+package schema
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/mapstructure"
+)
+
+// DiffFieldReader reads fields out of a diff structures.
+//
+// It also requires access to a Reader that reads fields from the structure
+// that the diff was derived from. This is usually the state. This is required
+// because a diff on its own doesn't have complete data about full objects
+// such as maps.
+//
+// The Source MUST be the data that the diff was derived from. If it isn't,
+// the behavior of this struct is undefined.
+//
+// Reading fields from a DiffFieldReader is identical to reading from
+// Source except the diff will be applied to the end result.
+//
+// The "Exists" field on the result will be set to true if the complete
+// field exists whether its from the source, diff, or a combination of both.
+// It cannot be determined whether a retrieved value is composed of
+// diff elements.
+type DiffFieldReader struct {
+ Diff *terraform.InstanceDiff
+ Source FieldReader
+ Schema map[string]*Schema
+}
+
+func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ schemaList := addrToSchema(address, r.Schema)
+ if len(schemaList) == 0 {
+ return FieldReadResult{}, nil
+ }
+
+ schema := schemaList[len(schemaList)-1]
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ return r.readPrimitive(address, schema)
+ case TypeList:
+ return readListField(r, address, schema)
+ case TypeMap:
+ return r.readMap(address, schema)
+ case TypeSet:
+ return r.readSet(address, schema)
+ case typeObject:
+ return readObjectField(r, address, schema.Elem.(map[string]*Schema))
+ default:
+ panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
+ }
+}
+
+func (r *DiffFieldReader) readMap(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ result := make(map[string]interface{})
+ resultSet := false
+
+ // First read the map from the underlying source
+ source, err := r.Source.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if source.Exists {
+ result = source.Value.(map[string]interface{})
+ resultSet = true
+ }
+
+ // Next, read all the elements we have in our diff, and apply
+ // the diff to our result.
+ prefix := strings.Join(address, ".") + "."
+ for k, v := range r.Diff.Attributes {
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+ if strings.HasPrefix(k, prefix+"%") {
+ // Ignore the count field
+ continue
+ }
+
+ resultSet = true
+
+ k = k[len(prefix):]
+ if v.NewRemoved {
+ delete(result, k)
+ continue
+ }
+
+ result[k] = v.New
+ }
+
+ err = mapValuesToPrimitive(result, schema)
+ if err != nil {
+ return FieldReadResult{}, nil
+ }
+
+ var resultVal interface{}
+ if resultSet {
+ resultVal = result
+ }
+
+ return FieldReadResult{
+ Value: resultVal,
+ Exists: resultSet,
+ }, nil
+}
+
+func (r *DiffFieldReader) readPrimitive(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ result, err := r.Source.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+
+ attrD, ok := r.Diff.Attributes[strings.Join(address, ".")]
+ if !ok {
+ return result, nil
+ }
+
+ var resultVal string
+ if !attrD.NewComputed {
+ resultVal = attrD.New
+ if attrD.NewExtra != nil {
+ result.ValueProcessed = resultVal
+ if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil {
+ return FieldReadResult{}, err
+ }
+ }
+ }
+
+ result.Computed = attrD.NewComputed
+ result.Exists = true
+ result.Value, err = stringToPrimitive(resultVal, false, schema)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+
+ return result, nil
+}
+
+func (r *DiffFieldReader) readSet(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ prefix := strings.Join(address, ".") + "."
+
+ // Create the set that will be our result
+ set := schema.ZeroValue().(*Set)
+
+ // Go through the map and find all the set items
+ for k, d := range r.Diff.Attributes {
+ if d.NewRemoved {
+ // If the field is removed, we always ignore it
+ continue
+ }
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+ if strings.HasSuffix(k, "#") {
+ // Ignore any count field
+ continue
+ }
+
+ // Split the key, since it might be a sub-object like "idx.field"
+ parts := strings.Split(k[len(prefix):], ".")
+ idx := parts[0]
+
+ raw, err := r.ReadField(append(address, idx))
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !raw.Exists {
+ // This shouldn't happen because we just verified it does exist
+ panic("missing field in set: " + k + "." + idx)
+ }
+
+ set.Add(raw.Value)
+ }
+
+ // Determine if the set "exists". It exists if there are items or if
+ // the diff explicitly wanted it empty.
+ exists := set.Len() > 0
+ if !exists {
+ // We could check if the diff value is "0" here but I think the
+ // existence of "#" on its own is enough to show it existed. This
+ // protects us in the future from the zero value changing from
+ // "0" to "" breaking us (if that were to happen).
+ if _, ok := r.Diff.Attributes[prefix+"#"]; ok {
+ exists = true
+ }
+ }
+
+ if !exists {
+ result, err := r.Source.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if result.Exists {
+ return result, nil
+ }
+ }
+
+ return FieldReadResult{
+ Value: set,
+ Exists: exists,
+ }, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
new file mode 100644
index 00000000..95339810
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
@@ -0,0 +1,232 @@
+package schema
+
+import (
+ "fmt"
+ "strings"
+)
+
+// MapFieldReader reads fields out of an untyped map[string]string to
+// the best of its ability.
+type MapFieldReader struct {
+ Map MapReader
+ Schema map[string]*Schema
+}
+
+func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ k := strings.Join(address, ".")
+ schemaList := addrToSchema(address, r.Schema)
+ if len(schemaList) == 0 {
+ return FieldReadResult{}, nil
+ }
+
+ schema := schemaList[len(schemaList)-1]
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ return r.readPrimitive(address, schema)
+ case TypeList:
+ return readListField(r, address, schema)
+ case TypeMap:
+ return r.readMap(k, schema)
+ case TypeSet:
+ return r.readSet(address, schema)
+ case typeObject:
+ return readObjectField(r, address, schema.Elem.(map[string]*Schema))
+ default:
+ panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+ }
+}
+
+func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
+ result := make(map[string]interface{})
+ resultSet := false
+
+ // If the name of the map field is directly in the map with an
+ // empty string, it means that the map is being deleted, so mark
+ // that is is set.
+ if v, ok := r.Map.Access(k); ok && v == "" {
+ resultSet = true
+ }
+
+ prefix := k + "."
+ r.Map.Range(func(k, v string) bool {
+ if strings.HasPrefix(k, prefix) {
+ resultSet = true
+
+ key := k[len(prefix):]
+ if key != "%" && key != "#" {
+ result[key] = v
+ }
+ }
+
+ return true
+ })
+
+ err := mapValuesToPrimitive(result, schema)
+ if err != nil {
+ return FieldReadResult{}, nil
+ }
+
+ var resultVal interface{}
+ if resultSet {
+ resultVal = result
+ }
+
+ return FieldReadResult{
+ Value: resultVal,
+ Exists: resultSet,
+ }, nil
+}
+
+func (r *MapFieldReader) readPrimitive(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ k := strings.Join(address, ".")
+ result, ok := r.Map.Access(k)
+ if !ok {
+ return FieldReadResult{}, nil
+ }
+
+ returnVal, err := stringToPrimitive(result, false, schema)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+
+ return FieldReadResult{
+ Value: returnVal,
+ Exists: true,
+ }, nil
+}
+
+func (r *MapFieldReader) readSet(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ // Get the number of elements in the list
+ countRaw, err := r.readPrimitive(
+ append(address, "#"), &Schema{Type: TypeInt})
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !countRaw.Exists {
+ // No count, means we have no list
+ countRaw.Value = 0
+ }
+
+ // Create the set that will be our result
+ set := schema.ZeroValue().(*Set)
+
+ // If we have an empty list, then return an empty list
+ if countRaw.Computed || countRaw.Value.(int) == 0 {
+ return FieldReadResult{
+ Value: set,
+ Exists: countRaw.Exists,
+ Computed: countRaw.Computed,
+ }, nil
+ }
+
+ // Go through the map and find all the set items
+ prefix := strings.Join(address, ".") + "."
+ countExpected := countRaw.Value.(int)
+ countActual := make(map[string]struct{})
+ completed := r.Map.Range(func(k, _ string) bool {
+ if !strings.HasPrefix(k, prefix) {
+ return true
+ }
+ if strings.HasPrefix(k, prefix+"#") {
+ // Ignore the count field
+ return true
+ }
+
+ // Split the key, since it might be a sub-object like "idx.field"
+ parts := strings.Split(k[len(prefix):], ".")
+ idx := parts[0]
+
+ var raw FieldReadResult
+ raw, err = r.ReadField(append(address, idx))
+ if err != nil {
+ return false
+ }
+ if !raw.Exists {
+ // This shouldn't happen because we just verified it does exist
+ panic("missing field in set: " + k + "." + idx)
+ }
+
+ set.Add(raw.Value)
+
+ // Due to the way multimap readers work, if we've seen the number
+ // of fields we expect, then exit so that we don't read later values.
+ // For example: the "set" map might have "ports.#", "ports.0", and
+ // "ports.1", but the "state" map might have those plus "ports.2".
+ // We don't want "ports.2"
+ countActual[idx] = struct{}{}
+ if len(countActual) >= countExpected {
+ return false
+ }
+
+ return true
+ })
+ if !completed && err != nil {
+ return FieldReadResult{}, err
+ }
+
+ return FieldReadResult{
+ Value: set,
+ Exists: true,
+ }, nil
+}
+
+// MapReader is an interface that is given to MapFieldReader for accessing
+// a "map". This can be used to have alternate implementations. For a basic
+// map[string]string, use BasicMapReader.
+type MapReader interface {
+ Access(string) (string, bool)
+ Range(func(string, string) bool) bool
+}
+
+// BasicMapReader implements MapReader for a single map.
+type BasicMapReader map[string]string
+
+func (r BasicMapReader) Access(k string) (string, bool) {
+ v, ok := r[k]
+ return v, ok
+}
+
+func (r BasicMapReader) Range(f func(string, string) bool) bool {
+ for k, v := range r {
+ if cont := f(k, v); !cont {
+ return false
+ }
+ }
+
+ return true
+}
+
+// MultiMapReader reads over multiple maps, preferring keys that are
+// founder earlier (lower number index) vs. later (higher number index)
+type MultiMapReader []map[string]string
+
+func (r MultiMapReader) Access(k string) (string, bool) {
+ for _, m := range r {
+ if v, ok := m[k]; ok {
+ return v, ok
+ }
+ }
+
+ return "", false
+}
+
+func (r MultiMapReader) Range(f func(string, string) bool) bool {
+ done := make(map[string]struct{})
+ for _, m := range r {
+ for k, v := range m {
+ if _, ok := done[k]; ok {
+ continue
+ }
+
+ if cont := f(k, v); !cont {
+ return false
+ }
+
+ done[k] = struct{}{}
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go
new file mode 100644
index 00000000..89ad3a86
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go
@@ -0,0 +1,63 @@
+package schema
+
+import (
+ "fmt"
+)
+
+// MultiLevelFieldReader reads from other field readers,
+// merging their results along the way in a specific order. You can specify
+// "levels" and name them in order to read only an exact level or up to
+// a specific level.
+//
+// This is useful for saying things such as "read the field from the state
+// and config and merge them" or "read the latest value of the field".
+type MultiLevelFieldReader struct {
+ Readers map[string]FieldReader
+ Levels []string
+}
+
+func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1])
+}
+
+func (r *MultiLevelFieldReader) ReadFieldExact(
+ address []string, level string) (FieldReadResult, error) {
+ reader, ok := r.Readers[level]
+ if !ok {
+ return FieldReadResult{}, fmt.Errorf(
+ "Unknown reader level: %s", level)
+ }
+
+ result, err := reader.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, fmt.Errorf(
+ "Error reading level %s: %s", level, err)
+ }
+
+ return result, nil
+}
+
+func (r *MultiLevelFieldReader) ReadFieldMerge(
+ address []string, level string) (FieldReadResult, error) {
+ var result FieldReadResult
+ for _, l := range r.Levels {
+ if r, ok := r.Readers[l]; ok {
+ out, err := r.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, fmt.Errorf(
+ "Error reading level %s: %s", l, err)
+ }
+
+ // TODO: computed
+ if out.Exists {
+ result = out
+ }
+ }
+
+ if l == level {
+ break
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go
new file mode 100644
index 00000000..9abc41b5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go
@@ -0,0 +1,8 @@
+package schema
+
+// FieldWriters are responsible for writing fields by address into
+// a proper typed representation. ResourceData uses this to write new data
+// into existing sources.
+type FieldWriter interface {
+ WriteField([]string, interface{}) error
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
new file mode 100644
index 00000000..689ed8d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
@@ -0,0 +1,319 @@
+package schema
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+// MapFieldWriter writes data into a single map[string]string structure.
+type MapFieldWriter struct {
+ Schema map[string]*Schema
+
+ lock sync.Mutex
+ result map[string]string
+}
+
+// Map returns the underlying map that is being written to.
+func (w *MapFieldWriter) Map() map[string]string {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if w.result == nil {
+ w.result = make(map[string]string)
+ }
+
+ return w.result
+}
+
+func (w *MapFieldWriter) unsafeWriteField(addr string, value string) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if w.result == nil {
+ w.result = make(map[string]string)
+ }
+
+ w.result[addr] = value
+}
+
+func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if w.result == nil {
+ w.result = make(map[string]string)
+ }
+
+ schemaList := addrToSchema(addr, w.Schema)
+ if len(schemaList) == 0 {
+ return fmt.Errorf("Invalid address to set: %#v", addr)
+ }
+
+ // If we're setting anything other than a list root or set root,
+ // then disallow it.
+ for _, schema := range schemaList[:len(schemaList)-1] {
+ if schema.Type == TypeList {
+ return fmt.Errorf(
+ "%s: can only set full list",
+ strings.Join(addr, "."))
+ }
+
+ if schema.Type == TypeMap {
+ return fmt.Errorf(
+ "%s: can only set full map",
+ strings.Join(addr, "."))
+ }
+
+ if schema.Type == TypeSet {
+ return fmt.Errorf(
+ "%s: can only set full set",
+ strings.Join(addr, "."))
+ }
+ }
+
+ return w.set(addr, value)
+}
+
+func (w *MapFieldWriter) set(addr []string, value interface{}) error {
+ schemaList := addrToSchema(addr, w.Schema)
+ if len(schemaList) == 0 {
+ return fmt.Errorf("Invalid address to set: %#v", addr)
+ }
+
+ schema := schemaList[len(schemaList)-1]
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ return w.setPrimitive(addr, value, schema)
+ case TypeList:
+ return w.setList(addr, value, schema)
+ case TypeMap:
+ return w.setMap(addr, value, schema)
+ case TypeSet:
+ return w.setSet(addr, value, schema)
+ case typeObject:
+ return w.setObject(addr, value, schema)
+ default:
+ panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
+ }
+}
+
+func (w *MapFieldWriter) setList(
+ addr []string,
+ v interface{},
+ schema *Schema) error {
+ k := strings.Join(addr, ".")
+ setElement := func(idx string, value interface{}) error {
+ addrCopy := make([]string, len(addr), len(addr)+1)
+ copy(addrCopy, addr)
+ return w.set(append(addrCopy, idx), value)
+ }
+
+ var vs []interface{}
+ if err := mapstructure.Decode(v, &vs); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+
+ // Set the entire list.
+ var err error
+ for i, elem := range vs {
+ is := strconv.FormatInt(int64(i), 10)
+ err = setElement(is, elem)
+ if err != nil {
+ break
+ }
+ }
+ if err != nil {
+ for i, _ := range vs {
+ is := strconv.FormatInt(int64(i), 10)
+ setElement(is, nil)
+ }
+
+ return err
+ }
+
+ w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10)
+ return nil
+}
+
+func (w *MapFieldWriter) setMap(
+ addr []string,
+ value interface{},
+ schema *Schema) error {
+ k := strings.Join(addr, ".")
+ v := reflect.ValueOf(value)
+ vs := make(map[string]interface{})
+
+ if value == nil {
+ // The empty string here means the map is removed.
+ w.result[k] = ""
+ return nil
+ }
+
+ if v.Kind() != reflect.Map {
+ return fmt.Errorf("%s: must be a map", k)
+ }
+ if v.Type().Key().Kind() != reflect.String {
+ return fmt.Errorf("%s: keys must strings", k)
+ }
+ for _, mk := range v.MapKeys() {
+ mv := v.MapIndex(mk)
+ vs[mk.String()] = mv.Interface()
+ }
+
+ // Remove the pure key since we're setting the full map value
+ delete(w.result, k)
+
+ // Set each subkey
+ addrCopy := make([]string, len(addr), len(addr)+1)
+ copy(addrCopy, addr)
+ for subKey, v := range vs {
+ if err := w.set(append(addrCopy, subKey), v); err != nil {
+ return err
+ }
+ }
+
+ // Set the count
+ w.result[k+".%"] = strconv.Itoa(len(vs))
+
+ return nil
+}
+
+func (w *MapFieldWriter) setObject(
+ addr []string,
+ value interface{},
+ schema *Schema) error {
+ // Set the entire object. First decode into a proper structure
+ var v map[string]interface{}
+ if err := mapstructure.Decode(value, &v); err != nil {
+ return fmt.Errorf("%s: %s", strings.Join(addr, "."), err)
+ }
+
+ // Make space for additional elements in the address
+ addrCopy := make([]string, len(addr), len(addr)+1)
+ copy(addrCopy, addr)
+
+ // Set each element in turn
+ var err error
+ for k1, v1 := range v {
+ if err = w.set(append(addrCopy, k1), v1); err != nil {
+ break
+ }
+ }
+ if err != nil {
+ for k1, _ := range v {
+ w.set(append(addrCopy, k1), nil)
+ }
+ }
+
+ return err
+}
+
+func (w *MapFieldWriter) setPrimitive(
+ addr []string,
+ v interface{},
+ schema *Schema) error {
+ k := strings.Join(addr, ".")
+
+ if v == nil {
+ // The empty string here means the value is removed.
+ w.result[k] = ""
+ return nil
+ }
+
+ var set string
+ switch schema.Type {
+ case TypeBool:
+ var b bool
+ if err := mapstructure.Decode(v, &b); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+
+ set = strconv.FormatBool(b)
+ case TypeString:
+ if err := mapstructure.Decode(v, &set); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ case TypeInt:
+ var n int
+ if err := mapstructure.Decode(v, &n); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ set = strconv.FormatInt(int64(n), 10)
+ case TypeFloat:
+ var n float64
+ if err := mapstructure.Decode(v, &n); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ set = strconv.FormatFloat(float64(n), 'G', -1, 64)
+ default:
+ return fmt.Errorf("Unknown type: %#v", schema.Type)
+ }
+
+ w.result[k] = set
+ return nil
+}
+
+func (w *MapFieldWriter) setSet(
+ addr []string,
+ value interface{},
+ schema *Schema) error {
+ addrCopy := make([]string, len(addr), len(addr)+1)
+ copy(addrCopy, addr)
+ k := strings.Join(addr, ".")
+
+ if value == nil {
+ w.result[k+".#"] = "0"
+ return nil
+ }
+
+ // If it is a slice, then we have to turn it into a *Set so that
+ // we get the proper order back based on the hash code.
+ if v := reflect.ValueOf(value); v.Kind() == reflect.Slice {
+ // Build a temp *ResourceData to use for the conversion
+ tempSchema := *schema
+ tempSchema.Type = TypeList
+ tempSchemaMap := map[string]*Schema{addr[0]: &tempSchema}
+ tempW := &MapFieldWriter{Schema: tempSchemaMap}
+
+ // Set the entire list, this lets us get sane values out of it
+ if err := tempW.WriteField(addr, value); err != nil {
+ return err
+ }
+
+ // Build the set by going over the list items in order and
+ // hashing them into the set. The reason we go over the list and
+ // not the `value` directly is because this forces all types
+ // to become []interface{} (generic) instead of []string, which
+ // most hash functions are expecting.
+ s := schema.ZeroValue().(*Set)
+ tempR := &MapFieldReader{
+ Map: BasicMapReader(tempW.Map()),
+ Schema: tempSchemaMap,
+ }
+ for i := 0; i < v.Len(); i++ {
+ is := strconv.FormatInt(int64(i), 10)
+ result, err := tempR.ReadField(append(addrCopy, is))
+ if err != nil {
+ return err
+ }
+ if !result.Exists {
+ panic("set item just set doesn't exist")
+ }
+
+ s.Add(result.Value)
+ }
+
+ value = s
+ }
+
+ for code, elem := range value.(*Set).m {
+ if err := w.set(append(addrCopy, code), elem); err != nil {
+ return err
+ }
+ }
+
+ w.result[k+".#"] = strconv.Itoa(value.(*Set).Len())
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
new file mode 100644
index 00000000..3a976293
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
@@ -0,0 +1,36 @@
+// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT.
+
+package schema
+
+import "fmt"
+
+const (
+ _getSource_name_0 = "getSourceStategetSourceConfig"
+ _getSource_name_1 = "getSourceDiff"
+ _getSource_name_2 = "getSourceSet"
+ _getSource_name_3 = "getSourceLevelMaskgetSourceExact"
+)
+
+var (
+ _getSource_index_0 = [...]uint8{0, 14, 29}
+ _getSource_index_1 = [...]uint8{0, 13}
+ _getSource_index_2 = [...]uint8{0, 12}
+ _getSource_index_3 = [...]uint8{0, 18, 32}
+)
+
+func (i getSource) String() string {
+ switch {
+ case 1 <= i && i <= 2:
+ i -= 1
+ return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]]
+ case i == 4:
+ return _getSource_name_1
+ case i == 8:
+ return _getSource_name_2
+ case 15 <= i && i <= 16:
+ i -= 15
+ return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]]
+ default:
+ return fmt.Sprintf("getSource(%d)", i)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
new file mode 100644
index 00000000..d52d2f5f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
@@ -0,0 +1,400 @@
+package schema
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Provider represents a resource provider in Terraform, and properly
+// implements all of the ResourceProvider API.
+//
+// By defining a schema for the configuration of the provider, the
+// map of supporting resources, and a configuration function, the schema
+// framework takes over and handles all the provider operations for you.
+//
+// After defining the provider structure, it is unlikely that you'll require any
+// of the methods on Provider itself.
+type Provider struct {
+ // Schema is the schema for the configuration of this provider. If this
+ // provider has no configuration, this can be omitted.
+ //
+ // The keys of this map are the configuration keys, and the value is
+ // the schema describing the value of the configuration.
+ Schema map[string]*Schema
+
+ // ResourcesMap is the list of available resources that this provider
+ // can manage, along with their Resource structure defining their
+ // own schemas and CRUD operations.
+ //
+ // Provider automatically handles routing operations such as Apply,
+ // Diff, etc. to the proper resource.
+ ResourcesMap map[string]*Resource
+
+ // DataSourcesMap is the collection of available data sources that
+ // this provider implements, with a Resource instance defining
+ // the schema and Read operation of each.
+ //
+ // Resource instances for data sources must have a Read function
+ // and must *not* implement Create, Update or Delete.
+ DataSourcesMap map[string]*Resource
+
+ // ConfigureFunc is a function for configuring the provider. If the
+ // provider doesn't need to be configured, this can be omitted.
+ //
+ // See the ConfigureFunc documentation for more information.
+ ConfigureFunc ConfigureFunc
+
+ // MetaReset is called by TestReset to reset any state stored in the meta
+ // interface. This is especially important if the StopContext is stored by
+ // the provider.
+ MetaReset func() error
+
+ meta interface{}
+
+ // a mutex is required because TestReset can directly repalce the stopCtx
+ stopMu sync.Mutex
+ stopCtx context.Context
+ stopCtxCancel context.CancelFunc
+ stopOnce sync.Once
+}
+
+// ConfigureFunc is the function used to configure a Provider.
+//
+// The interface{} value returned by this function is stored and passed into
+// the subsequent resources as the meta parameter. This return value is
+// usually used to pass along a configured API client, a configuration
+// structure, etc.
+type ConfigureFunc func(*ResourceData) (interface{}, error)
+
+// InternalValidate should be called to validate the structure
+// of the provider.
+//
+// This should be called in a unit test for any provider to verify
+// before release that a provider is properly configured for use with
+// this library.
+func (p *Provider) InternalValidate() error {
+ if p == nil {
+ return errors.New("provider is nil")
+ }
+
+ var validationErrors error
+ sm := schemaMap(p.Schema)
+ if err := sm.InternalValidate(sm); err != nil {
+ validationErrors = multierror.Append(validationErrors, err)
+ }
+
+ for k, r := range p.ResourcesMap {
+ if err := r.InternalValidate(nil, true); err != nil {
+ validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err))
+ }
+ }
+
+ for k, r := range p.DataSourcesMap {
+ if err := r.InternalValidate(nil, false); err != nil {
+ validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err))
+ }
+ }
+
+ return validationErrors
+}
+
+// Meta returns the metadata associated with this provider that was
+// returned by the Configure call. It will be nil until Configure is called.
+func (p *Provider) Meta() interface{} {
+ return p.meta
+}
+
+// SetMeta can be used to forcefully set the Meta object of the provider.
+// Note that if Configure is called the return value will override anything
+// set here.
+func (p *Provider) SetMeta(v interface{}) {
+ p.meta = v
+}
+
+// Stopped reports whether the provider has been stopped or not.
+func (p *Provider) Stopped() bool {
+ ctx := p.StopContext()
+ select {
+ case <-ctx.Done():
+ return true
+ default:
+ return false
+ }
+}
+
+// StopCh returns a channel that is closed once the provider is stopped.
+func (p *Provider) StopContext() context.Context {
+ p.stopOnce.Do(p.stopInit)
+
+ p.stopMu.Lock()
+ defer p.stopMu.Unlock()
+
+ return p.stopCtx
+}
+
+func (p *Provider) stopInit() {
+ p.stopMu.Lock()
+ defer p.stopMu.Unlock()
+
+ p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
+}
+
+// Stop implementation of terraform.ResourceProvider interface.
+func (p *Provider) Stop() error {
+ p.stopOnce.Do(p.stopInit)
+
+ p.stopMu.Lock()
+ defer p.stopMu.Unlock()
+
+ p.stopCtxCancel()
+ return nil
+}
+
+// TestReset resets any state stored in the Provider, and will call TestReset
+// on Meta if it implements the TestProvider interface.
+// This may be used to reset the schema.Provider at the start of a test, and is
+// automatically called by resource.Test.
+func (p *Provider) TestReset() error {
+ p.stopInit()
+ if p.MetaReset != nil {
+ return p.MetaReset()
+ }
+ return nil
+}
+
+// Input implementation of terraform.ResourceProvider interface.
+func (p *Provider) Input(
+ input terraform.UIInput,
+ c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+ return schemaMap(p.Schema).Input(input, c)
+}
+
+// Validate implementation of terraform.ResourceProvider interface.
+func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ if err := p.InternalValidate(); err != nil {
+ return nil, []error{fmt.Errorf(
+ "Internal validation of the provider failed! This is always a bug\n"+
+ "with the provider itself, and not a user issue. Please report\n"+
+ "this bug:\n\n%s", err)}
+ }
+
+ return schemaMap(p.Schema).Validate(c)
+}
+
+// ValidateResource implementation of terraform.ResourceProvider interface.
+func (p *Provider) ValidateResource(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ r, ok := p.ResourcesMap[t]
+ if !ok {
+ return nil, []error{fmt.Errorf(
+ "Provider doesn't support resource: %s", t)}
+ }
+
+ return r.Validate(c)
+}
+
+// Configure implementation of terraform.ResourceProvider interface.
+func (p *Provider) Configure(c *terraform.ResourceConfig) error {
+ // No configuration
+ if p.ConfigureFunc == nil {
+ return nil
+ }
+
+ sm := schemaMap(p.Schema)
+
+ // Get a ResourceData for this configuration. To do this, we actually
+ // generate an intermediary "diff" although that is never exposed.
+ diff, err := sm.Diff(nil, c)
+ if err != nil {
+ return err
+ }
+
+ data, err := sm.Data(nil, diff)
+ if err != nil {
+ return err
+ }
+
+ meta, err := p.ConfigureFunc(data)
+ if err != nil {
+ return err
+ }
+
+ p.meta = meta
+ return nil
+}
+
+// Apply implementation of terraform.ResourceProvider interface.
+func (p *Provider) Apply(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+ r, ok := p.ResourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+ }
+
+ return r.Apply(s, d, p.meta)
+}
+
+// Diff implementation of terraform.ResourceProvider interface.
+func (p *Provider) Diff(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+ r, ok := p.ResourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+ }
+
+ return r.Diff(s, c)
+}
+
+// Refresh implementation of terraform.ResourceProvider interface.
+func (p *Provider) Refresh(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState) (*terraform.InstanceState, error) {
+ r, ok := p.ResourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+ }
+
+ return r.Refresh(s, p.meta)
+}
+
+// Resources implementation of terraform.ResourceProvider interface.
+func (p *Provider) Resources() []terraform.ResourceType {
+ keys := make([]string, 0, len(p.ResourcesMap))
+ for k, _ := range p.ResourcesMap {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ result := make([]terraform.ResourceType, 0, len(keys))
+ for _, k := range keys {
+ resource := p.ResourcesMap[k]
+
+ // This isn't really possible (it'd fail InternalValidate), but
+ // we do it anyways to avoid a panic.
+ if resource == nil {
+ resource = &Resource{}
+ }
+
+ result = append(result, terraform.ResourceType{
+ Name: k,
+ Importable: resource.Importer != nil,
+ })
+ }
+
+ return result
+}
+
+func (p *Provider) ImportState(
+ info *terraform.InstanceInfo,
+ id string) ([]*terraform.InstanceState, error) {
+ // Find the resource
+ r, ok := p.ResourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+ }
+
+ // If it doesn't support import, error
+ if r.Importer == nil {
+ return nil, fmt.Errorf("resource %s doesn't support import", info.Type)
+ }
+
+ // Create the data
+ data := r.Data(nil)
+ data.SetId(id)
+ data.SetType(info.Type)
+
+ // Call the import function
+ results := []*ResourceData{data}
+ if r.Importer.State != nil {
+ var err error
+ results, err = r.Importer.State(data, p.meta)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Convert the results to InstanceState values and return it
+ states := make([]*terraform.InstanceState, len(results))
+ for i, r := range results {
+ states[i] = r.State()
+ }
+
+ // Verify that all are non-nil. If there are any nil the error
+ // isn't obvious so we circumvent that with a friendlier error.
+ for _, s := range states {
+ if s == nil {
+ return nil, fmt.Errorf(
+ "nil entry in ImportState results. This is always a bug with\n" +
+ "the resource that is being imported. Please report this as\n" +
+ "a bug to Terraform.")
+ }
+ }
+
+ return states, nil
+}
+
+// ValidateDataSource implementation of terraform.ResourceProvider interface.
+func (p *Provider) ValidateDataSource(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ r, ok := p.DataSourcesMap[t]
+ if !ok {
+ return nil, []error{fmt.Errorf(
+ "Provider doesn't support data source: %s", t)}
+ }
+
+ return r.Validate(c)
+}
+
+// ReadDataDiff implementation of terraform.ResourceProvider interface.
+func (p *Provider) ReadDataDiff(
+ info *terraform.InstanceInfo,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+
+ r, ok := p.DataSourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown data source: %s", info.Type)
+ }
+
+ return r.Diff(nil, c)
+}
+
+// RefreshData implementation of terraform.ResourceProvider interface.
+func (p *Provider) ReadDataApply(
+ info *terraform.InstanceInfo,
+ d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+
+ r, ok := p.DataSourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown data source: %s", info.Type)
+ }
+
+ return r.ReadDataApply(d, p.meta)
+}
+
+// DataSources implementation of terraform.ResourceProvider interface.
+func (p *Provider) DataSources() []terraform.DataSource {
+ keys := make([]string, 0, len(p.DataSourcesMap))
+ for k, _ := range p.DataSourcesMap {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ result := make([]terraform.DataSource, 0, len(keys))
+ for _, k := range keys {
+ result = append(result, terraform.DataSource{
+ Name: k,
+ })
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
new file mode 100644
index 00000000..c1564a21
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
@@ -0,0 +1,180 @@
+package schema
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Provisioner represents a resource provisioner in Terraform and properly
+// implements all of the ResourceProvisioner API.
+//
+// This higher level structure makes it much easier to implement a new or
+// custom provisioner for Terraform.
+//
+// The function callbacks for this structure are all passed a context object.
+// This context object has a number of pre-defined values that can be accessed
+// via the global functions defined in context.go.
+type Provisioner struct {
+ // ConnSchema is the schema for the connection settings for this
+ // provisioner.
+ //
+ // The keys of this map are the configuration keys, and the value is
+ // the schema describing the value of the configuration.
+ //
+ // NOTE: The value of connection keys can only be strings for now.
+ ConnSchema map[string]*Schema
+
+ // Schema is the schema for the usage of this provisioner.
+ //
+ // The keys of this map are the configuration keys, and the value is
+ // the schema describing the value of the configuration.
+ Schema map[string]*Schema
+
+ // ApplyFunc is the function for executing the provisioner. This is required.
+ // It is given a context. See the Provisioner struct docs for more
+ // information.
+ ApplyFunc func(ctx context.Context) error
+
+ stopCtx context.Context
+ stopCtxCancel context.CancelFunc
+ stopOnce sync.Once
+}
+
+// Keys that can be used to access data in the context parameters for
+// Provisioners.
+var (
+ connDataInvalid = contextKey("data invalid")
+
+ // This returns a *ResourceData for the connection information.
+ // Guaranteed to never be nil.
+ ProvConnDataKey = contextKey("provider conn data")
+
+ // This returns a *ResourceData for the config information.
+ // Guaranteed to never be nil.
+ ProvConfigDataKey = contextKey("provider config data")
+
+ // This returns a terraform.UIOutput. Guaranteed to never be nil.
+ ProvOutputKey = contextKey("provider output")
+
+ // This returns the raw InstanceState passed to Apply. Guaranteed to
+ // be set, but may be nil.
+ ProvRawStateKey = contextKey("provider raw state")
+)
+
+// InternalValidate should be called to validate the structure
+// of the provisioner.
+//
+// This should be called in a unit test to verify before release that this
+// structure is properly configured for use.
+func (p *Provisioner) InternalValidate() error {
+ if p == nil {
+ return errors.New("provisioner is nil")
+ }
+
+ var validationErrors error
+ {
+ sm := schemaMap(p.ConnSchema)
+ if err := sm.InternalValidate(sm); err != nil {
+ validationErrors = multierror.Append(validationErrors, err)
+ }
+ }
+
+ {
+ sm := schemaMap(p.Schema)
+ if err := sm.InternalValidate(sm); err != nil {
+ validationErrors = multierror.Append(validationErrors, err)
+ }
+ }
+
+ if p.ApplyFunc == nil {
+ validationErrors = multierror.Append(validationErrors, fmt.Errorf(
+ "ApplyFunc must not be nil"))
+ }
+
+ return validationErrors
+}
+
+// StopContext returns a context that checks whether a provisioner is stopped.
+func (p *Provisioner) StopContext() context.Context {
+ p.stopOnce.Do(p.stopInit)
+ return p.stopCtx
+}
+
+func (p *Provisioner) stopInit() {
+ p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
+}
+
+// Stop implementation of terraform.ResourceProvisioner interface.
+func (p *Provisioner) Stop() error {
+ p.stopOnce.Do(p.stopInit)
+ p.stopCtxCancel()
+ return nil
+}
+
+func (p *Provisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ return schemaMap(p.Schema).Validate(c)
+}
+
+// Apply implementation of terraform.ResourceProvisioner interface.
+func (p *Provisioner) Apply(
+ o terraform.UIOutput,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) error {
+ var connData, configData *ResourceData
+
+ {
+ // We first need to turn the connection information into a
+ // terraform.ResourceConfig so that we can use that type to more
+ // easily build a ResourceData structure. We do this by simply treating
+ // the conn info as configuration input.
+ raw := make(map[string]interface{})
+ if s != nil {
+ for k, v := range s.Ephemeral.ConnInfo {
+ raw[k] = v
+ }
+ }
+
+ c, err := config.NewRawConfig(raw)
+ if err != nil {
+ return err
+ }
+
+ sm := schemaMap(p.ConnSchema)
+ diff, err := sm.Diff(nil, terraform.NewResourceConfig(c))
+ if err != nil {
+ return err
+ }
+ connData, err = sm.Data(nil, diff)
+ if err != nil {
+ return err
+ }
+ }
+
+ {
+ // Build the configuration data. Doing this requires making a "diff"
+ // even though that's never used. We use that just to get the correct types.
+ configMap := schemaMap(p.Schema)
+ diff, err := configMap.Diff(nil, c)
+ if err != nil {
+ return err
+ }
+ configData, err = configMap.Data(nil, diff)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Build the context and call the function
+ ctx := p.StopContext()
+ ctx = context.WithValue(ctx, ProvConnDataKey, connData)
+ ctx = context.WithValue(ctx, ProvConfigDataKey, configData)
+ ctx = context.WithValue(ctx, ProvOutputKey, o)
+ ctx = context.WithValue(ctx, ProvRawStateKey, s)
+ return p.ApplyFunc(ctx)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
new file mode 100644
index 00000000..c8105588
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
@@ -0,0 +1,478 @@
+package schema
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "strconv"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Resource represents a thing in Terraform that has a set of configurable
+// attributes and a lifecycle (create, read, update, delete).
+//
+// The Resource schema is an abstraction that allows provider writers to
+// worry only about CRUD operations while off-loading validation, diff
+// generation, etc. to this higher level library.
+//
+// In spite of the name, this struct is not used only for terraform resources,
+// but also for data sources. In the case of data sources, the Create,
+// Update and Delete functions must not be provided.
+type Resource struct {
+ // Schema is the schema for the configuration of this resource.
+ //
+ // The keys of this map are the configuration keys, and the values
+ // describe the schema of the configuration value.
+ //
+ // The schema is used to represent both configurable data as well
+ // as data that might be computed in the process of creating this
+ // resource.
+ Schema map[string]*Schema
+
+ // SchemaVersion is the version number for this resource's Schema
+ // definition. The current SchemaVersion stored in the state for each
+ // resource. Provider authors can increment this version number
+ // when Schema semantics change. If the State's SchemaVersion is less than
+ // the current SchemaVersion, the InstanceState is yielded to the
+ // MigrateState callback, where the provider can make whatever changes it
+ // needs to update the state to be compatible to the latest version of the
+ // Schema.
+ //
+ // When unset, SchemaVersion defaults to 0, so provider authors can start
+ // their Versioning at any integer >= 1
+ SchemaVersion int
+
+ // MigrateState is responsible for updating an InstanceState with an old
+ // version to the format expected by the current version of the Schema.
+ //
+ // It is called during Refresh if the State's stored SchemaVersion is less
+ // than the current SchemaVersion of the Resource.
+ //
+ // The function is yielded the state's stored SchemaVersion and a pointer to
+ // the InstanceState that needs updating, as well as the configured
+ // provider's configured meta interface{}, in case the migration process
+ // needs to make any remote API calls.
+ MigrateState StateMigrateFunc
+
+ // The functions below are the CRUD operations for this resource.
+ //
+ // The only optional operation is Update. If Update is not implemented,
+ // then updates will not be supported for this resource.
+ //
+ // The ResourceData parameter in the functions below are used to
+ // query configuration and changes for the resource as well as to set
+ // the ID, computed data, etc.
+ //
+ // The interface{} parameter is the result of the ConfigureFunc in
+ // the provider for this resource. If the provider does not define
+ // a ConfigureFunc, this will be nil. This parameter should be used
+ // to store API clients, configuration structures, etc.
+ //
+ // If any errors occur during each of the operation, an error should be
+ // returned. If a resource was partially updated, be careful to enable
+ // partial state mode for ResourceData and use it accordingly.
+ //
+ // Exists is a function that is called to check if a resource still
+ // exists. If this returns false, then this will affect the diff
+ // accordingly. If this function isn't set, it will not be called. It
+ // is highly recommended to set it. The *ResourceData passed to Exists
+ // should _not_ be modified.
+ Create CreateFunc
+ Read ReadFunc
+ Update UpdateFunc
+ Delete DeleteFunc
+ Exists ExistsFunc
+
+ // Importer is the ResourceImporter implementation for this resource.
+ // If this is nil, then this resource does not support importing. If
+ // this is non-nil, then it supports importing and ResourceImporter
+ // must be validated. The validity of ResourceImporter is verified
+ // by InternalValidate on Resource.
+ Importer *ResourceImporter
+
+ // If non-empty, this string is emitted as a warning during Validate.
+ // This is a private interface for now, for use by DataSourceResourceShim,
+ // and not for general use. (But maybe later...)
+ deprecationMessage string
+
+ // Timeouts allow users to specify specific time durations in which an
+ // operation should time out, to allow them to extend an action to suit their
+ // usage. For example, a user may specify a large Creation timeout for their
+ // AWS RDS Instance due to it's size, or restoring from a snapshot.
+ // Resource implementors must enable Timeout support by adding the allowed
+ // actions (Create, Read, Update, Delete, Default) to the Resource struct, and
+ // accessing them in the matching methods.
+ Timeouts *ResourceTimeout
+}
+
+// See Resource documentation.
+type CreateFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type ReadFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type UpdateFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type DeleteFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type ExistsFunc func(*ResourceData, interface{}) (bool, error)
+
+// See Resource documentation.
+type StateMigrateFunc func(
+ int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error)
+
+// Apply creates, updates, and/or deletes a resource.
+func (r *Resource) Apply(
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff,
+ meta interface{}) (*terraform.InstanceState, error) {
+ data, err := schemaMap(r.Schema).Data(s, d)
+ if err != nil {
+ return s, err
+ }
+
+ // Instance Diff shoould have the timeout info, need to copy it over to the
+ // ResourceData meta
+ rt := ResourceTimeout{}
+ if _, ok := d.Meta[TimeoutKey]; ok {
+ if err := rt.DiffDecode(d); err != nil {
+ log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
+ }
+ } else {
+ log.Printf("[DEBUG] No meta timeoutkey found in Apply()")
+ }
+ data.timeouts = &rt
+
+ if s == nil {
+ // The Terraform API dictates that this should never happen, but
+ // it doesn't hurt to be safe in this case.
+ s = new(terraform.InstanceState)
+ }
+
+ if d.Destroy || d.RequiresNew() {
+ if s.ID != "" {
+ // Destroy the resource since it is created
+ if err := r.Delete(data, meta); err != nil {
+ return r.recordCurrentSchemaVersion(data.State()), err
+ }
+
+ // Make sure the ID is gone.
+ data.SetId("")
+ }
+
+ // If we're only destroying, and not creating, then return
+ // now since we're done!
+ if !d.RequiresNew() {
+ return nil, nil
+ }
+
+ // Reset the data to be stateless since we just destroyed
+ data, err = schemaMap(r.Schema).Data(nil, d)
+ // data was reset, need to re-apply the parsed timeouts
+ data.timeouts = &rt
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ err = nil
+ if data.Id() == "" {
+ // We're creating, it is a new resource.
+ data.MarkNewResource()
+ err = r.Create(data, meta)
+ } else {
+ if r.Update == nil {
+ return s, fmt.Errorf("doesn't support update")
+ }
+
+ err = r.Update(data, meta)
+ }
+
+ return r.recordCurrentSchemaVersion(data.State()), err
+}
+
+// Diff returns a diff of this resource and is API compatible with the
+// ResourceProvider interface.
+func (r *Resource) Diff(
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+
+ t := &ResourceTimeout{}
+ err := t.ConfigDecode(r, c)
+
+ if err != nil {
+ return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
+ }
+
+ instanceDiff, err := schemaMap(r.Schema).Diff(s, c)
+ if err != nil {
+ return instanceDiff, err
+ }
+
+ if instanceDiff != nil {
+ if err := t.DiffEncode(instanceDiff); err != nil {
+ log.Printf("[ERR] Error encoding timeout to instance diff: %s", err)
+ }
+ } else {
+ log.Printf("[DEBUG] Instance Diff is nil in Diff()")
+ }
+
+ return instanceDiff, err
+}
+
+// Validate validates the resource configuration against the schema.
+func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ warns, errs := schemaMap(r.Schema).Validate(c)
+
+ if r.deprecationMessage != "" {
+ warns = append(warns, r.deprecationMessage)
+ }
+
+ return warns, errs
+}
+
+// ReadDataApply loads the data for a data source, given a diff that
+// describes the configuration arguments and desired computed attributes.
+func (r *Resource) ReadDataApply(
+ d *terraform.InstanceDiff,
+ meta interface{},
+) (*terraform.InstanceState, error) {
+
+ // Data sources are always built completely from scratch
+ // on each read, so the source state is always nil.
+ data, err := schemaMap(r.Schema).Data(nil, d)
+ if err != nil {
+ return nil, err
+ }
+
+ err = r.Read(data, meta)
+ state := data.State()
+ if state != nil && state.ID == "" {
+ // Data sources can set an ID if they want, but they aren't
+ // required to; we'll provide a placeholder if they don't,
+ // to preserve the invariant that all resources have non-empty
+ // ids.
+ state.ID = "-"
+ }
+
+ return r.recordCurrentSchemaVersion(state), err
+}
+
+// Refresh refreshes the state of the resource.
+func (r *Resource) Refresh(
+ s *terraform.InstanceState,
+ meta interface{}) (*terraform.InstanceState, error) {
+ // If the ID is already somehow blank, it doesn't exist
+ if s.ID == "" {
+ return nil, nil
+ }
+
+ rt := ResourceTimeout{}
+ if _, ok := s.Meta[TimeoutKey]; ok {
+ if err := rt.StateDecode(s); err != nil {
+ log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
+ }
+ }
+
+ if r.Exists != nil {
+ // Make a copy of data so that if it is modified it doesn't
+ // affect our Read later.
+ data, err := schemaMap(r.Schema).Data(s, nil)
+ data.timeouts = &rt
+
+ if err != nil {
+ return s, err
+ }
+
+ exists, err := r.Exists(data, meta)
+ if err != nil {
+ return s, err
+ }
+ if !exists {
+ return nil, nil
+ }
+ }
+
+ needsMigration, stateSchemaVersion := r.checkSchemaVersion(s)
+ if needsMigration && r.MigrateState != nil {
+ s, err := r.MigrateState(stateSchemaVersion, s, meta)
+ if err != nil {
+ return s, err
+ }
+ }
+
+ data, err := schemaMap(r.Schema).Data(s, nil)
+ data.timeouts = &rt
+ if err != nil {
+ return s, err
+ }
+
+ err = r.Read(data, meta)
+ state := data.State()
+ if state != nil && state.ID == "" {
+ state = nil
+ }
+
+ return r.recordCurrentSchemaVersion(state), err
+}
+
+// InternalValidate should be called to validate the structure
+// of the resource.
+//
+// This should be called in a unit test for any resource to verify
+// before release that a resource is properly configured for use with
+// this library.
+//
+// Provider.InternalValidate() will automatically call this for all of
+// the resources it manages, so you don't need to call this manually if it
+// is part of a Provider.
+func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error {
+ if r == nil {
+ return errors.New("resource is nil")
+ }
+
+ if !writable {
+ if r.Create != nil || r.Update != nil || r.Delete != nil {
+ return fmt.Errorf("must not implement Create, Update or Delete")
+ }
+ }
+
+ tsm := topSchemaMap
+
+ if r.isTopLevel() && writable {
+ // All non-Computed attributes must be ForceNew if Update is not defined
+ if r.Update == nil {
+ nonForceNewAttrs := make([]string, 0)
+ for k, v := range r.Schema {
+ if !v.ForceNew && !v.Computed {
+ nonForceNewAttrs = append(nonForceNewAttrs, k)
+ }
+ }
+ if len(nonForceNewAttrs) > 0 {
+ return fmt.Errorf(
+ "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs)
+ }
+ } else {
+ nonUpdateableAttrs := make([]string, 0)
+ for k, v := range r.Schema {
+ if v.ForceNew || v.Computed && !v.Optional {
+ nonUpdateableAttrs = append(nonUpdateableAttrs, k)
+ }
+ }
+ updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs)
+ if updateableAttrs == 0 {
+ return fmt.Errorf(
+ "All fields are ForceNew or Computed w/out Optional, Update is superfluous")
+ }
+ }
+
+ tsm = schemaMap(r.Schema)
+
+ // Destroy, and Read are required
+ if r.Read == nil {
+ return fmt.Errorf("Read must be implemented")
+ }
+ if r.Delete == nil {
+ return fmt.Errorf("Delete must be implemented")
+ }
+
+ // If we have an importer, we need to verify the importer.
+ if r.Importer != nil {
+ if err := r.Importer.InternalValidate(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return schemaMap(r.Schema).InternalValidate(tsm)
+}
+
+// Data returns a ResourceData struct for this Resource. Each return value
+// is a separate copy and can be safely modified differently.
+//
+// The data returned from this function has no actual affect on the Resource
+// itself (including the state given to this function).
+//
+// This function is useful for unit tests and ResourceImporter functions.
+func (r *Resource) Data(s *terraform.InstanceState) *ResourceData {
+ result, err := schemaMap(r.Schema).Data(s, nil)
+ if err != nil {
+ // At the time of writing, this isn't possible (Data never returns
+ // non-nil errors). We panic to find this in the future if we have to.
+ // I don't see a reason for Data to ever return an error.
+ panic(err)
+ }
+
+ // Set the schema version to latest by default
+ result.meta = map[string]interface{}{
+ "schema_version": strconv.Itoa(r.SchemaVersion),
+ }
+
+ return result
+}
+
+// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing
+//
+// TODO: May be able to be removed with the above ResourceData function.
+func (r *Resource) TestResourceData() *ResourceData {
+ return &ResourceData{
+ schema: r.Schema,
+ }
+}
+
+// Returns true if the resource is "top level" i.e. not a sub-resource.
+func (r *Resource) isTopLevel() bool {
+ // TODO: This is a heuristic; replace with a definitive attribute?
+ return r.Create != nil
+}
+
+// Determines if a given InstanceState needs to be migrated by checking the
+// stored version number with the current SchemaVersion
+func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) {
+ // Get the raw interface{} value for the schema version. If it doesn't
+ // exist or is nil then set it to zero.
+ raw := is.Meta["schema_version"]
+ if raw == nil {
+ raw = "0"
+ }
+
+ // Try to convert it to a string. If it isn't a string then we pretend
+ // that it isn't set at all. It should never not be a string unless it
+ // was manually tampered with.
+ rawString, ok := raw.(string)
+ if !ok {
+ rawString = "0"
+ }
+
+ stateSchemaVersion, _ := strconv.Atoi(rawString)
+ return stateSchemaVersion < r.SchemaVersion, stateSchemaVersion
+}
+
+func (r *Resource) recordCurrentSchemaVersion(
+ state *terraform.InstanceState) *terraform.InstanceState {
+ if state != nil && r.SchemaVersion > 0 {
+ if state.Meta == nil {
+ state.Meta = make(map[string]interface{})
+ }
+ state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion)
+ }
+ return state
+}
+
+// Noop is a convenience implementation of resource function which takes
+// no action and returns no error.
+func Noop(*ResourceData, interface{}) error {
+ return nil
+}
+
+// RemoveFromState is a convenience implementation of a resource function
+// which sets the resource ID to empty string (to remove it from state)
+// and returns no error.
+func RemoveFromState(d *ResourceData, _ interface{}) error {
+ d.SetId("")
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
new file mode 100644
index 00000000..b2bc8f6c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
@@ -0,0 +1,502 @@
+package schema
+
+import (
+ "log"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// ResourceData is used to query and set the attributes of a resource.
+//
+// ResourceData is the primary argument received for CRUD operations on
+// a resource as well as configuration of a provider. It is a powerful
+// structure that can be used to not only query data, but check for changes,
+// define partial state updates, etc.
+//
+// The most relevant methods to take a look at are Get, Set, and Partial.
+type ResourceData struct {
+ // Settable (internally)
+ schema map[string]*Schema
+ config *terraform.ResourceConfig
+ state *terraform.InstanceState
+ diff *terraform.InstanceDiff
+ meta map[string]interface{}
+ timeouts *ResourceTimeout
+
+ // Don't set
+ multiReader *MultiLevelFieldReader
+ setWriter *MapFieldWriter
+ newState *terraform.InstanceState
+ partial bool
+ partialMap map[string]struct{}
+ once sync.Once
+ isNew bool
+}
+
+// getResult is the internal structure that is generated when a Get
+// is called that contains some extra data that might be used.
+type getResult struct {
+ Value interface{}
+ ValueProcessed interface{}
+ Computed bool
+ Exists bool
+ Schema *Schema
+}
+
+// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary
+// values, bypassing schema. This MUST NOT be used in normal circumstances -
+// it exists only to support the remote_state data source.
+func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) {
+ d.once.Do(d.init)
+
+ d.setWriter.unsafeWriteField(key, value)
+}
+
+// Get returns the data for the given key, or nil if the key doesn't exist
+// in the schema.
+//
+// If the key does exist in the schema but doesn't exist in the configuration,
+// then the default value for that type will be returned. For strings, this is
+// "", for numbers it is 0, etc.
+//
+// If you want to test if something is set at all in the configuration,
+// use GetOk.
+func (d *ResourceData) Get(key string) interface{} {
+ v, _ := d.GetOk(key)
+ return v
+}
+
+// GetChange returns the old and new value for a given key.
+//
+// HasChange should be used to check if a change exists. It is possible
+// that both the old and new value are the same if the old value was not
+// set and the new value is. This is common, for example, for boolean
+// fields which have a zero value of false.
+func (d *ResourceData) GetChange(key string) (interface{}, interface{}) {
+ o, n := d.getChange(key, getSourceState, getSourceDiff)
+ return o.Value, n.Value
+}
+
+// GetOk returns the data for the given key and whether or not the key
+// has been set to a non-zero value at some point.
+//
+// The first result will not necessarilly be nil if the value doesn't exist.
+// The second result should be checked to determine this information.
+func (d *ResourceData) GetOk(key string) (interface{}, bool) {
+ r := d.getRaw(key, getSourceSet)
+ exists := r.Exists && !r.Computed
+ if exists {
+ // If it exists, we also want to verify it is not the zero-value.
+ value := r.Value
+ zero := r.Schema.Type.Zero()
+
+ if eq, ok := value.(Equal); ok {
+ exists = !eq.Equal(zero)
+ } else {
+ exists = !reflect.DeepEqual(value, zero)
+ }
+ }
+
+ return r.Value, exists
+}
+
+func (d *ResourceData) getRaw(key string, level getSource) getResult {
+ var parts []string
+ if key != "" {
+ parts = strings.Split(key, ".")
+ }
+
+ return d.get(parts, level)
+}
+
+// HasChange returns whether or not the given key has been changed.
+func (d *ResourceData) HasChange(key string) bool {
+ o, n := d.GetChange(key)
+
+ // If the type implements the Equal interface, then call that
+ // instead of just doing a reflect.DeepEqual. An example where this is
+ // needed is *Set
+ if eq, ok := o.(Equal); ok {
+ return !eq.Equal(n)
+ }
+
+ return !reflect.DeepEqual(o, n)
+}
+
+// Partial turns partial state mode on/off.
+//
+// When partial state mode is enabled, then only key prefixes specified
+// by SetPartial will be in the final state. This allows providers to return
+// partial states for partially applied resources (when errors occur).
+func (d *ResourceData) Partial(on bool) {
+ d.partial = on
+ if on {
+ if d.partialMap == nil {
+ d.partialMap = make(map[string]struct{})
+ }
+ } else {
+ d.partialMap = nil
+ }
+}
+
+// Set sets the value for the given key.
+//
+// If the key is invalid or the value is not a correct type, an error
+// will be returned.
+func (d *ResourceData) Set(key string, value interface{}) error {
+ d.once.Do(d.init)
+
+ // If the value is a pointer to a non-struct, get its value and
+ // use that. This allows Set to take a pointer to primitives to
+ // simplify the interface.
+ reflectVal := reflect.ValueOf(value)
+ if reflectVal.Kind() == reflect.Ptr {
+ if reflectVal.IsNil() {
+ // If the pointer is nil, then the value is just nil
+ value = nil
+ } else {
+ // Otherwise, we dereference the pointer as long as its not
+ // a pointer to a struct, since struct pointers are allowed.
+ reflectVal = reflect.Indirect(reflectVal)
+ if reflectVal.Kind() != reflect.Struct {
+ value = reflectVal.Interface()
+ }
+ }
+ }
+
+ return d.setWriter.WriteField(strings.Split(key, "."), value)
+}
+
+// SetPartial adds the key to the final state output while
+// in partial state mode. The key must be a root key in the schema (i.e.
+// it cannot be "list.0").
+//
+// If partial state mode is disabled, then this has no effect. Additionally,
+// whenever partial state mode is toggled, the partial data is cleared.
+func (d *ResourceData) SetPartial(k string) {
+ if d.partial {
+ d.partialMap[k] = struct{}{}
+ }
+}
+
+func (d *ResourceData) MarkNewResource() {
+ d.isNew = true
+}
+
+func (d *ResourceData) IsNewResource() bool {
+ return d.isNew
+}
+
+// Id returns the ID of the resource.
+func (d *ResourceData) Id() string {
+ var result string
+
+ if d.state != nil {
+ result = d.state.ID
+ }
+
+ if d.newState != nil {
+ result = d.newState.ID
+ }
+
+ return result
+}
+
+// ConnInfo returns the connection info for this resource.
+func (d *ResourceData) ConnInfo() map[string]string {
+ if d.newState != nil {
+ return d.newState.Ephemeral.ConnInfo
+ }
+
+ if d.state != nil {
+ return d.state.Ephemeral.ConnInfo
+ }
+
+ return nil
+}
+
+// SetId sets the ID of the resource. If the value is blank, then the
+// resource is destroyed.
+func (d *ResourceData) SetId(v string) {
+ d.once.Do(d.init)
+ d.newState.ID = v
+}
+
+// SetConnInfo sets the connection info for a resource.
+func (d *ResourceData) SetConnInfo(v map[string]string) {
+ d.once.Do(d.init)
+ d.newState.Ephemeral.ConnInfo = v
+}
+
+// SetType sets the ephemeral type for the data. This is only required
+// for importing.
+func (d *ResourceData) SetType(t string) {
+ d.once.Do(d.init)
+ d.newState.Ephemeral.Type = t
+}
+
+// State returns the new InstanceState after the diff and any Set
+// calls.
+func (d *ResourceData) State() *terraform.InstanceState {
+ var result terraform.InstanceState
+ result.ID = d.Id()
+ result.Meta = d.meta
+
+ // If we have no ID, then this resource doesn't exist and we just
+ // return nil.
+ if result.ID == "" {
+ return nil
+ }
+
+ if d.timeouts != nil {
+ if err := d.timeouts.StateEncode(&result); err != nil {
+ log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err)
+ }
+ }
+
+ // Look for a magic key in the schema that determines we skip the
+ // integrity check of fields existing in the schema, allowing dynamic
+ // keys to be created.
+ hasDynamicAttributes := false
+ for k, _ := range d.schema {
+ if k == "__has_dynamic_attributes" {
+ hasDynamicAttributes = true
+ log.Printf("[INFO] Resource %s has dynamic attributes", result.ID)
+ }
+ }
+
+ // In order to build the final state attributes, we read the full
+ // attribute set as a map[string]interface{}, write it to a MapFieldWriter,
+ // and then use that map.
+ rawMap := make(map[string]interface{})
+ for k := range d.schema {
+ source := getSourceSet
+ if d.partial {
+ source = getSourceState
+ if _, ok := d.partialMap[k]; ok {
+ source = getSourceSet
+ }
+ }
+
+ raw := d.get([]string{k}, source)
+ if raw.Exists && !raw.Computed {
+ rawMap[k] = raw.Value
+ if raw.ValueProcessed != nil {
+ rawMap[k] = raw.ValueProcessed
+ }
+ }
+ }
+
+ mapW := &MapFieldWriter{Schema: d.schema}
+ if err := mapW.WriteField(nil, rawMap); err != nil {
+ return nil
+ }
+
+ result.Attributes = mapW.Map()
+
+ if hasDynamicAttributes {
+ // If we have dynamic attributes, just copy the attributes map
+ // one for one into the result attributes.
+ for k, v := range d.setWriter.Map() {
+ // Don't clobber schema values. This limits usage of dynamic
+ // attributes to names which _do not_ conflict with schema
+ // keys!
+ if _, ok := result.Attributes[k]; !ok {
+ result.Attributes[k] = v
+ }
+ }
+ }
+
+ if d.newState != nil {
+ result.Ephemeral = d.newState.Ephemeral
+ }
+
+ // TODO: This is hacky and we can remove this when we have a proper
+ // state writer. We should instead have a proper StateFieldWriter
+ // and use that.
+ for k, schema := range d.schema {
+ if schema.Type != TypeMap {
+ continue
+ }
+
+ if result.Attributes[k] == "" {
+ delete(result.Attributes, k)
+ }
+ }
+
+ if v := d.Id(); v != "" {
+ result.Attributes["id"] = d.Id()
+ }
+
+ if d.state != nil {
+ result.Tainted = d.state.Tainted
+ }
+
+ return &result
+}
+
+// Timeout returns the data for the given timeout key
+// Returns a duration of 20 minutes for any key not found, or not found and no default.
+func (d *ResourceData) Timeout(key string) time.Duration {
+ key = strings.ToLower(key)
+
+ var timeout *time.Duration
+ switch key {
+ case TimeoutCreate:
+ timeout = d.timeouts.Create
+ case TimeoutRead:
+ timeout = d.timeouts.Read
+ case TimeoutUpdate:
+ timeout = d.timeouts.Update
+ case TimeoutDelete:
+ timeout = d.timeouts.Delete
+ }
+
+ if timeout != nil {
+ return *timeout
+ }
+
+ if d.timeouts.Default != nil {
+ return *d.timeouts.Default
+ }
+
+ // Return system default of 20 minutes
+ return 20 * time.Minute
+}
+
+func (d *ResourceData) init() {
+ // Initialize the field that will store our new state
+ var copyState terraform.InstanceState
+ if d.state != nil {
+ copyState = *d.state.DeepCopy()
+ }
+ d.newState = &copyState
+
+ // Initialize the map for storing set data
+ d.setWriter = &MapFieldWriter{Schema: d.schema}
+
+ // Initialize the reader for getting data from the
+ // underlying sources (config, diff, etc.)
+ readers := make(map[string]FieldReader)
+ var stateAttributes map[string]string
+ if d.state != nil {
+ stateAttributes = d.state.Attributes
+ readers["state"] = &MapFieldReader{
+ Schema: d.schema,
+ Map: BasicMapReader(stateAttributes),
+ }
+ }
+ if d.config != nil {
+ readers["config"] = &ConfigFieldReader{
+ Schema: d.schema,
+ Config: d.config,
+ }
+ }
+ if d.diff != nil {
+ readers["diff"] = &DiffFieldReader{
+ Schema: d.schema,
+ Diff: d.diff,
+ Source: &MultiLevelFieldReader{
+ Levels: []string{"state", "config"},
+ Readers: readers,
+ },
+ }
+ }
+ readers["set"] = &MapFieldReader{
+ Schema: d.schema,
+ Map: BasicMapReader(d.setWriter.Map()),
+ }
+ d.multiReader = &MultiLevelFieldReader{
+ Levels: []string{
+ "state",
+ "config",
+ "diff",
+ "set",
+ },
+
+ Readers: readers,
+ }
+}
+
+func (d *ResourceData) diffChange(
+ k string) (interface{}, interface{}, bool, bool) {
+ // Get the change between the state and the config.
+ o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact)
+ if !o.Exists {
+ o.Value = nil
+ }
+ if !n.Exists {
+ n.Value = nil
+ }
+
+ // Return the old, new, and whether there is a change
+ return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed
+}
+
+func (d *ResourceData) getChange(
+ k string,
+ oldLevel getSource,
+ newLevel getSource) (getResult, getResult) {
+ var parts, parts2 []string
+ if k != "" {
+ parts = strings.Split(k, ".")
+ parts2 = strings.Split(k, ".")
+ }
+
+ o := d.get(parts, oldLevel)
+ n := d.get(parts2, newLevel)
+ return o, n
+}
+
+func (d *ResourceData) get(addr []string, source getSource) getResult {
+ d.once.Do(d.init)
+
+ level := "set"
+ flags := source & ^getSourceLevelMask
+ exact := flags&getSourceExact != 0
+ source = source & getSourceLevelMask
+ if source >= getSourceSet {
+ level = "set"
+ } else if source >= getSourceDiff {
+ level = "diff"
+ } else if source >= getSourceConfig {
+ level = "config"
+ } else {
+ level = "state"
+ }
+
+ var result FieldReadResult
+ var err error
+ if exact {
+ result, err = d.multiReader.ReadFieldExact(addr, level)
+ } else {
+ result, err = d.multiReader.ReadFieldMerge(addr, level)
+ }
+ if err != nil {
+ panic(err)
+ }
+
+ // If the result doesn't exist, then we set the value to the zero value
+ var schema *Schema
+ if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 {
+ schema = schemaL[len(schemaL)-1]
+ }
+
+ if result.Value == nil && schema != nil {
+ result.Value = result.ValueOrZero(schema)
+ }
+
+ // Transform the FieldReadResult into a getResult. It might be worth
+ // merging these two structures one day.
+ return getResult{
+ Value: result.Value,
+ ValueProcessed: result.ValueProcessed,
+ Computed: result.Computed,
+ Exists: result.Exists,
+ Schema: schema,
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go
new file mode 100644
index 00000000..7dd655de
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go
@@ -0,0 +1,17 @@
+package schema
+
+//go:generate stringer -type=getSource resource_data_get_source.go
+
+// getSource represents the level we want to get for a value (internally).
+// Any source less than or equal to the level will be loaded (whichever
+// has a value first).
+type getSource byte
+
+const (
+ getSourceState getSource = 1 << iota
+ getSourceConfig
+ getSourceDiff
+ getSourceSet
+ getSourceExact // Only get from the _exact_ level
+ getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet
+)
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go
new file mode 100644
index 00000000..5dada3ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go
@@ -0,0 +1,52 @@
+package schema
+
+// ResourceImporter defines how a resource is imported in Terraform. This
+// can be set onto a Resource struct to make it Importable. Not all resources
+// have to be importable; if a Resource doesn't have a ResourceImporter then
+// it won't be importable.
+//
+// "Importing" in Terraform is the process of taking an already-created
+// resource and bringing it under Terraform management. This can include
+// updating Terraform state, generating Terraform configuration, etc.
+type ResourceImporter struct {
+ // The functions below must all be implemented for importing to work.
+
+ // State is called to convert an ID to one or more InstanceState to
+ // insert into the Terraform state. If this isn't specified, then
+ // the ID is passed straight through.
+ State StateFunc
+}
+
+// StateFunc is the function called to import a resource into the
+// Terraform state. It is given a ResourceData with only ID set. This
+// ID is going to be an arbitrary value given by the user and may not map
+// directly to the ID format that the resource expects, so that should
+// be validated.
+//
+// This should return a slice of ResourceData that turn into the state
+// that was imported. This might be as simple as returning only the argument
+// that was given to the function. In other cases (such as AWS security groups),
+// an import may fan out to multiple resources and this will have to return
+// multiple.
+//
+// To create the ResourceData structures for other resource types (if
+// you have to), instantiate your resource and call the Data function.
+type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error)
+
+// InternalValidate should be called to validate the structure of this
+// importer. This should be called in a unit test.
+//
+// Resource.InternalValidate() will automatically call this, so this doesn't
+// need to be called manually. Further, Resource.InternalValidate() is
+// automatically called by Provider.InternalValidate(), so you only need
+// to internal validate the provider.
+func (r *ResourceImporter) InternalValidate() error {
+ return nil
+}
+
+// ImportStatePassthrough is an implementation of StateFunc that can be
+// used to simply pass the ID directly through. This should be used only
+// in the case that an ID-only refresh is possible.
+func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) {
+ return []*ResourceData{d}, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
new file mode 100644
index 00000000..445819f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
@@ -0,0 +1,237 @@
+package schema
+
+import (
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/copystructure"
+)
+
+const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0"
+const TimeoutsConfigKey = "timeouts"
+
+const (
+ TimeoutCreate = "create"
+ TimeoutRead = "read"
+ TimeoutUpdate = "update"
+ TimeoutDelete = "delete"
+ TimeoutDefault = "default"
+)
+
+func timeoutKeys() []string {
+ return []string{
+ TimeoutCreate,
+ TimeoutRead,
+ TimeoutUpdate,
+ TimeoutDelete,
+ TimeoutDefault,
+ }
+}
+
+// could be time.Duration, int64 or float64
+func DefaultTimeout(tx interface{}) *time.Duration {
+ var td time.Duration
+ switch raw := tx.(type) {
+ case time.Duration:
+ return &raw
+ case int64:
+ td = time.Duration(raw)
+ case float64:
+ td = time.Duration(int64(raw))
+ default:
+ log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx)
+ }
+ return &td
+}
+
+type ResourceTimeout struct {
+ Create, Read, Update, Delete, Default *time.Duration
+}
+
+// ConfigDecode takes a schema and the configuration (available in Diff) and
+// validates, parses the timeouts into `t`
+func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) error {
+ if s.Timeouts != nil {
+ raw, err := copystructure.Copy(s.Timeouts)
+ if err != nil {
+ log.Printf("[DEBUG] Error with deep copy: %s", err)
+ }
+ *t = *raw.(*ResourceTimeout)
+ }
+
+ if raw, ok := c.Config[TimeoutsConfigKey]; ok {
+ if configTimeouts, ok := raw.([]map[string]interface{}); ok {
+ for _, timeoutValues := range configTimeouts {
+ // loop through each Timeout given in the configuration and validate they
+ // the Timeout defined in the resource
+ for timeKey, timeValue := range timeoutValues {
+ // validate that we're dealing with the normal CRUD actions
+ var found bool
+ for _, key := range timeoutKeys() {
+ if timeKey == key {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey)
+ }
+
+ // Get timeout
+ rt, err := time.ParseDuration(timeValue.(string))
+ if err != nil {
+ return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err)
+ }
+
+ var timeout *time.Duration
+ switch timeKey {
+ case TimeoutCreate:
+ timeout = t.Create
+ case TimeoutUpdate:
+ timeout = t.Update
+ case TimeoutRead:
+ timeout = t.Read
+ case TimeoutDelete:
+ timeout = t.Delete
+ case TimeoutDefault:
+ timeout = t.Default
+ }
+
+ // If the resource has not delcared this in the definition, then error
+ // with an unsupported message
+ if timeout == nil {
+ return unsupportedTimeoutKeyError(timeKey)
+ }
+
+ *timeout = rt
+ }
+ }
+ } else {
+ log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts")
+ }
+ }
+
+ return nil
+}
+
+func unsupportedTimeoutKeyError(key string) error {
+ return fmt.Errorf("Timeout Key (%s) is not supported", key)
+}
+
+// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder
+// interface: they encode/decode a timeouts struct from an instance diff, which is
+// where the timeout data is stored after a diff to pass into Apply.
+//
+// StateEncode encodes the timeout into the ResourceData's InstanceState for
+// saving to state
+//
+func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error {
+ return t.metaEncode(id)
+}
+
+func (t *ResourceTimeout) StateEncode(is *terraform.InstanceState) error {
+ return t.metaEncode(is)
+}
+
+// metaEncode encodes the ResourceTimeout into a map[string]interface{} format
+// and stores it in the Meta field of the interface it's given.
+// Assumes the interface is either *terraform.InstanceState or
+// *terraform.InstanceDiff, returns an error otherwise
+func (t *ResourceTimeout) metaEncode(ids interface{}) error {
+ m := make(map[string]interface{})
+
+ if t.Create != nil {
+ m[TimeoutCreate] = t.Create.Nanoseconds()
+ }
+ if t.Read != nil {
+ m[TimeoutRead] = t.Read.Nanoseconds()
+ }
+ if t.Update != nil {
+ m[TimeoutUpdate] = t.Update.Nanoseconds()
+ }
+ if t.Delete != nil {
+ m[TimeoutDelete] = t.Delete.Nanoseconds()
+ }
+ if t.Default != nil {
+ m[TimeoutDefault] = t.Default.Nanoseconds()
+ // for any key above that is nil, if default is specified, we need to
+ // populate it with the default
+ for _, k := range timeoutKeys() {
+ if _, ok := m[k]; !ok {
+ m[k] = t.Default.Nanoseconds()
+ }
+ }
+ }
+
+ // only add the Timeout to the Meta if we have values
+ if len(m) > 0 {
+ switch instance := ids.(type) {
+ case *terraform.InstanceDiff:
+ if instance.Meta == nil {
+ instance.Meta = make(map[string]interface{})
+ }
+ instance.Meta[TimeoutKey] = m
+ case *terraform.InstanceState:
+ if instance.Meta == nil {
+ instance.Meta = make(map[string]interface{})
+ }
+ instance.Meta[TimeoutKey] = m
+ default:
+ return fmt.Errorf("Error matching type for Diff Encode")
+ }
+ }
+
+ return nil
+}
+
+func (t *ResourceTimeout) StateDecode(id *terraform.InstanceState) error {
+ return t.metaDecode(id)
+}
+func (t *ResourceTimeout) DiffDecode(is *terraform.InstanceDiff) error {
+ return t.metaDecode(is)
+}
+
+func (t *ResourceTimeout) metaDecode(ids interface{}) error {
+ var rawMeta interface{}
+ var ok bool
+ switch rawInstance := ids.(type) {
+ case *terraform.InstanceDiff:
+ rawMeta, ok = rawInstance.Meta[TimeoutKey]
+ if !ok {
+ return nil
+ }
+ case *terraform.InstanceState:
+ rawMeta, ok = rawInstance.Meta[TimeoutKey]
+ if !ok {
+ return nil
+ }
+ default:
+ return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids)
+ }
+
+ times := rawMeta.(map[string]interface{})
+ if len(times) == 0 {
+ return nil
+ }
+
+ if v, ok := times[TimeoutCreate]; ok {
+ t.Create = DefaultTimeout(v)
+ }
+ if v, ok := times[TimeoutRead]; ok {
+ t.Read = DefaultTimeout(v)
+ }
+ if v, ok := times[TimeoutUpdate]; ok {
+ t.Update = DefaultTimeout(v)
+ }
+ if v, ok := times[TimeoutDelete]; ok {
+ t.Delete = DefaultTimeout(v)
+ }
+ if v, ok := times[TimeoutDefault]; ok {
+ t.Default = DefaultTimeout(v)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
new file mode 100644
index 00000000..32d17213
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
@@ -0,0 +1,1537 @@
+// schema is a high-level framework for easily writing new providers
+// for Terraform. Usage of schema is recommended over attempting to write
+// to the low-level plugin interfaces manually.
+//
+// schema breaks down provider creation into simple CRUD operations for
+// resources. The logic of diffing, destroying before creating, updating
+// or creating, etc. is all handled by the framework. The plugin author
+// only needs to implement a configuration schema and the CRUD operations and
+// everything else is meant to just work.
+//
+// A good starting point is to view the Provider structure.
+package schema
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/mapstructure"
+)
+
+// type used for schema package context keys
+type contextKey string
+
+// Schema is used to describe the structure of a value.
+//
+// Read the documentation of the struct elements for important details.
+type Schema struct {
+ // Type is the type of the value and must be one of the ValueType values.
+ //
+ // This type not only determines what type is expected/valid in configuring
+ // this value, but also what type is returned when ResourceData.Get is
+ // called. The types returned by Get are:
+ //
+ // TypeBool - bool
+ // TypeInt - int
+ // TypeFloat - float64
+ // TypeString - string
+ // TypeList - []interface{}
+ // TypeMap - map[string]interface{}
+ // TypeSet - *schema.Set
+ //
+ Type ValueType
+
+ // If one of these is set, then this item can come from the configuration.
+ // Both cannot be set. If Optional is set, the value is optional. If
+ // Required is set, the value is required.
+ //
+ // One of these must be set if the value is not computed. That is:
+ // value either comes from the config, is computed, or is both.
+ Optional bool
+ Required bool
+
+ // If this is non-nil, the provided function will be used during diff
+ // of this field. If this is nil, a default diff for the type of the
+ // schema will be used.
+ //
+ // This allows comparison based on something other than primitive, list
+ // or map equality - for example SSH public keys may be considered
+ // equivalent regardless of trailing whitespace.
+ DiffSuppressFunc SchemaDiffSuppressFunc
+
+ // If this is non-nil, then this will be a default value that is used
+ // when this item is not set in the configuration.
+ //
+ // DefaultFunc can be specified to compute a dynamic default.
+ // Only one of Default or DefaultFunc can be set. If DefaultFunc is
+ // used then its return value should be stable to avoid generating
+ // confusing/perpetual diffs.
+ //
+ // Changing either Default or the return value of DefaultFunc can be
+ // a breaking change, especially if the attribute in question has
+ // ForceNew set. If a default needs to change to align with changing
+ // assumptions in an upstream API then it may be necessary to also use
+ // the MigrateState function on the resource to change the state to match,
+ // or have the Read function adjust the state value to align with the
+ // new default.
+ //
+ // If Required is true above, then Default cannot be set. DefaultFunc
+ // can be set with Required. If the DefaultFunc returns nil, then there
+ // will be no default and the user will be asked to fill it in.
+ //
+ // If either of these is set, then the user won't be asked for input
+ // for this key if the default is not nil.
+ Default interface{}
+ DefaultFunc SchemaDefaultFunc
+
+ // Description is used as the description for docs or asking for user
+ // input. It should be relatively short (a few sentences max) and should
+ // be formatted to fit a CLI.
+ Description string
+
+ // InputDefault is the default value to use for when inputs are requested.
+ // This differs from Default in that if Default is set, no input is
+ // asked for. If Input is asked, this will be the default value offered.
+ InputDefault string
+
+ // The fields below relate to diffs.
+ //
+ // If Computed is true, then the result of this value is computed
+ // (unless specified by config) on creation.
+ //
+ // If ForceNew is true, then a change in this resource necessitates
+ // the creation of a new resource.
+ //
+ // StateFunc is a function called to change the value of this before
+ // storing it in the state (and likewise before comparing for diffs).
+ // The use for this is for example with large strings, you may want
+ // to simply store the hash of it.
+ Computed bool
+ ForceNew bool
+ StateFunc SchemaStateFunc
+
+ // The following fields are only set for a TypeList or TypeSet Type.
+ //
+ // Elem must be either a *Schema or a *Resource only if the Type is
+ // TypeList, and represents what the element type is. If it is *Schema,
+ // the element type is just a simple value. If it is *Resource, the
+ // element type is a complex structure, potentially with its own lifecycle.
+ //
+ // MaxItems defines a maximum amount of items that can exist within a
+ // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
+ // used to wrap a complex structure, however more than one instance would
+ // cause instability.
+ //
+ // MinItems defines a minimum amount of items that can exist within a
+ // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
+ // used to wrap a complex structure, however less than one instance would
+ // cause instability.
+ //
+ // PromoteSingle, if true, will allow single elements to be standalone
+ // and promote them to a list. For example "foo" would be promoted to
+ // ["foo"] automatically. This is primarily for legacy reasons and the
+ // ambiguity is not recommended for new usage. Promotion is only allowed
+ // for primitive element types.
+ Elem interface{}
+ MaxItems int
+ MinItems int
+ PromoteSingle bool
+
+ // The following fields are only valid for a TypeSet type.
+ //
+ // Set defines a function to determine the unique ID of an item so that
+ // a proper set can be built.
+ Set SchemaSetFunc
+
+ // ComputedWhen is a set of queries on the configuration. Whenever any
+ // of these things is changed, it will require a recompute (this requires
+ // that Computed is set to true).
+ //
+ // NOTE: This currently does not work.
+ ComputedWhen []string
+
+ // ConflictsWith is a set of schema keys that conflict with this schema.
+ // This will only check that they're set in the _config_. This will not
+ // raise an error for a malfunctioning resource that sets a conflicting
+ // key.
+ ConflictsWith []string
+
+ // When Deprecated is set, this attribute is deprecated.
+ //
+ // A deprecated field still works, but will probably stop working in near
+ // future. This string is the message shown to the user with instructions on
+ // how to address the deprecation.
+ Deprecated string
+
+ // When Removed is set, this attribute has been removed from the schema
+ //
+ // Removed attributes can be left in the Schema to generate informative error
+ // messages for the user when they show up in resource configurations.
+ // This string is the message shown to the user with instructions on
+ // what do to about the removed attribute.
+ Removed string
+
+ // ValidateFunc allows individual fields to define arbitrary validation
+ // logic. It is yielded the provided config value as an interface{} that is
+ // guaranteed to be of the proper Schema type, and it can yield warnings or
+ // errors based on inspection of that value.
+ //
+ // ValidateFunc currently only works for primitive types.
+ ValidateFunc SchemaValidateFunc
+
+ // Sensitive ensures that the attribute's value does not get displayed in
+ // logs or regular output. It should be used for passwords or other
+ // secret fields. Future versions of Terraform may encrypt these
+ // values.
+ Sensitive bool
+}
+
+// SchemaDiffSuppresFunc is a function which can be used to determine
+// whether a detected diff on a schema element is "valid" or not, and
+// suppress it from the plan if necessary.
+//
+// Return true if the diff should be suppressed, false to retain it.
+type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool
+
+// SchemaDefaultFunc is a function called to return a default value for
+// a field.
+type SchemaDefaultFunc func() (interface{}, error)
+
+// EnvDefaultFunc is a helper function that returns the value of the
+// given environment variable, if one exists, or the default value
+// otherwise.
+func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc {
+ return func() (interface{}, error) {
+ if v := os.Getenv(k); v != "" {
+ return v, nil
+ }
+
+ return dv, nil
+ }
+}
+
+// MultiEnvDefaultFunc is a helper function that returns the value of the first
+// environment variable in the given list that returns a non-empty value. If
+// none of the environment variables return a value, the default value is
+// returned.
+func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc {
+ return func() (interface{}, error) {
+ for _, k := range ks {
+ if v := os.Getenv(k); v != "" {
+ return v, nil
+ }
+ }
+ return dv, nil
+ }
+}
+
+// SchemaSetFunc is a function that must return a unique ID for the given
+// element. This unique ID is used to store the element in a hash.
+type SchemaSetFunc func(interface{}) int
+
+// SchemaStateFunc is a function used to convert some type to a string
+// to be stored in the state.
+type SchemaStateFunc func(interface{}) string
+
+// SchemaValidateFunc is a function used to validate a single field in the
+// schema.
+type SchemaValidateFunc func(interface{}, string) ([]string, []error)
+
+func (s *Schema) GoString() string {
+ return fmt.Sprintf("*%#v", *s)
+}
+
+// Returns a default value for this schema by either reading Default or
+// evaluating DefaultFunc. If neither of these are defined, returns nil.
+func (s *Schema) DefaultValue() (interface{}, error) {
+ if s.Default != nil {
+ return s.Default, nil
+ }
+
+ if s.DefaultFunc != nil {
+ defaultValue, err := s.DefaultFunc()
+ if err != nil {
+ return nil, fmt.Errorf("error loading default: %s", err)
+ }
+ return defaultValue, nil
+ }
+
+ return nil, nil
+}
+
+// Returns a zero value for the schema.
+func (s *Schema) ZeroValue() interface{} {
+ // If it's a set then we'll do a bit of extra work to provide the
+ // right hashing function in our empty value.
+ if s.Type == TypeSet {
+ setFunc := s.Set
+ if setFunc == nil {
+ // Default set function uses the schema to hash the whole value
+ elem := s.Elem
+ switch t := elem.(type) {
+ case *Schema:
+ setFunc = HashSchema(t)
+ case *Resource:
+ setFunc = HashResource(t)
+ default:
+ panic("invalid set element type")
+ }
+ }
+ return &Set{F: setFunc}
+ } else {
+ return s.Type.Zero()
+ }
+}
+
+func (s *Schema) finalizeDiff(
+ d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff {
+ if d == nil {
+ return d
+ }
+
+ if s.Type == TypeBool {
+ normalizeBoolString := func(s string) string {
+ switch s {
+ case "0":
+ return "false"
+ case "1":
+ return "true"
+ }
+ return s
+ }
+ d.Old = normalizeBoolString(d.Old)
+ d.New = normalizeBoolString(d.New)
+ }
+
+ if s.Computed && !d.NewRemoved && d.New == "" {
+ // Computed attribute without a new value set
+ d.NewComputed = true
+ }
+
+ if s.ForceNew {
+ // ForceNew, mark that this field is requiring new under the
+ // following conditions, explained below:
+ //
+ // * Old != New - There is a change in value. This field
+ // is therefore causing a new resource.
+ //
+ // * NewComputed - This field is being computed, hence a
+ // potential change in value, mark as causing a new resource.
+ d.RequiresNew = d.Old != d.New || d.NewComputed
+ }
+
+ if d.NewRemoved {
+ return d
+ }
+
+ if s.Computed {
+ if d.Old != "" && d.New == "" {
+ // This is a computed value with an old value set already,
+ // just let it go.
+ return nil
+ }
+
+ if d.New == "" {
+ // Computed attribute without a new value set
+ d.NewComputed = true
+ }
+ }
+
+ if s.Sensitive {
+ // Set the Sensitive flag so output is hidden in the UI
+ d.Sensitive = true
+ }
+
+ return d
+}
+
+// schemaMap is a wrapper that adds nice functions on top of schemas.
+type schemaMap map[string]*Schema
+
+// Data returns a ResourceData for the given schema, state, and diff.
+//
+// The diff is optional.
+func (m schemaMap) Data(
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff) (*ResourceData, error) {
+ return &ResourceData{
+ schema: m,
+ state: s,
+ diff: d,
+ }, nil
+}
+
+// Diff returns the diff for a resource given the schema map,
+// state, and configuration.
+func (m schemaMap) Diff(
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+ result := new(terraform.InstanceDiff)
+ result.Attributes = make(map[string]*terraform.ResourceAttrDiff)
+
+ // Make sure to mark if the resource is tainted
+ if s != nil {
+ result.DestroyTainted = s.Tainted
+ }
+
+ d := &ResourceData{
+ schema: m,
+ state: s,
+ config: c,
+ }
+
+ for k, schema := range m {
+ err := m.diff(k, schema, result, d, false)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // If the diff requires a new resource, then we recompute the diff
+ // so we have the complete new resource diff, and preserve the
+ // RequiresNew fields where necessary so the user knows exactly what
+ // caused that.
+ if result.RequiresNew() {
+ // Create the new diff
+ result2 := new(terraform.InstanceDiff)
+ result2.Attributes = make(map[string]*terraform.ResourceAttrDiff)
+
+ // Preserve the DestroyTainted flag
+ result2.DestroyTainted = result.DestroyTainted
+
+ // Reset the data to not contain state. We have to call init()
+ // again in order to reset the FieldReaders.
+ d.state = nil
+ d.init()
+
+ // Perform the diff again
+ for k, schema := range m {
+ err := m.diff(k, schema, result2, d, false)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Force all the fields to not force a new since we know what we
+ // want to force new.
+ for k, attr := range result2.Attributes {
+ if attr == nil {
+ continue
+ }
+
+ if attr.RequiresNew {
+ attr.RequiresNew = false
+ }
+
+ if s != nil {
+ attr.Old = s.Attributes[k]
+ }
+ }
+
+ // Now copy in all the requires new diffs...
+ for k, attr := range result.Attributes {
+ if attr == nil {
+ continue
+ }
+
+ newAttr, ok := result2.Attributes[k]
+ if !ok {
+ newAttr = attr
+ }
+
+ if attr.RequiresNew {
+ newAttr.RequiresNew = true
+ }
+
+ result2.Attributes[k] = newAttr
+ }
+
+ // And set the diff!
+ result = result2
+ }
+
+ // Remove any nil diffs just to keep things clean
+ for k, v := range result.Attributes {
+ if v == nil {
+ delete(result.Attributes, k)
+ }
+ }
+
+ // Go through and detect all of the ComputedWhens now that we've
+ // finished the diff.
+ // TODO
+
+ if result.Empty() {
+ // If we don't have any diff elements, just return nil
+ return nil, nil
+ }
+
+ return result, nil
+}
+
+// Input implements the terraform.ResourceProvider method by asking
+// for input for required configuration keys that don't have a value.
+func (m schemaMap) Input(
+ input terraform.UIInput,
+ c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+ keys := make([]string, 0, len(m))
+ for k, _ := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := m[k]
+
+ // Skip things that don't require config, if that is even valid
+ // for a provider schema.
+ // Required XOR Optional must always be true to validate, so we only
+ // need to check one.
+ if v.Optional {
+ continue
+ }
+
+ // Deprecated fields should never prompt
+ if v.Deprecated != "" {
+ continue
+ }
+
+ // Skip things that have a value of some sort already
+ if _, ok := c.Raw[k]; ok {
+ continue
+ }
+
+ // Skip if it has a default value
+ defaultValue, err := v.DefaultValue()
+ if err != nil {
+ return nil, fmt.Errorf("%s: error loading default: %s", k, err)
+ }
+ if defaultValue != nil {
+ continue
+ }
+
+ var value interface{}
+ switch v.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList:
+ continue
+ case TypeString:
+ value, err = m.inputString(input, k, v)
+ default:
+ panic(fmt.Sprintf("Unknown type for input: %#v", v.Type))
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf(
+ "%s: %s", k, err)
+ }
+
+ c.Config[k] = value
+ }
+
+ return c, nil
+}
+
+// Validate validates the configuration against this schema mapping.
+func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ return m.validateObject("", m, c)
+}
+
+// InternalValidate validates the format of this schema. This should be called
+// from a unit test (and not in user-path code) to verify that a schema
+// is properly built.
+func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
+ if topSchemaMap == nil {
+ topSchemaMap = m
+ }
+ for k, v := range m {
+ if v.Type == TypeInvalid {
+ return fmt.Errorf("%s: Type must be specified", k)
+ }
+
+ if v.Optional && v.Required {
+ return fmt.Errorf("%s: Optional or Required must be set, not both", k)
+ }
+
+ if v.Required && v.Computed {
+ return fmt.Errorf("%s: Cannot be both Required and Computed", k)
+ }
+
+ if !v.Required && !v.Optional && !v.Computed {
+ return fmt.Errorf("%s: One of optional, required, or computed must be set", k)
+ }
+
+ if v.Computed && v.Default != nil {
+ return fmt.Errorf("%s: Default must be nil if computed", k)
+ }
+
+ if v.Required && v.Default != nil {
+ return fmt.Errorf("%s: Default cannot be set with Required", k)
+ }
+
+ if len(v.ComputedWhen) > 0 && !v.Computed {
+ return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k)
+ }
+
+ if len(v.ConflictsWith) > 0 && v.Required {
+ return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k)
+ }
+
+ if len(v.ConflictsWith) > 0 {
+ for _, key := range v.ConflictsWith {
+ parts := strings.Split(key, ".")
+ sm := topSchemaMap
+ var target *Schema
+ for _, part := range parts {
+ // Skip index fields
+ if _, err := strconv.Atoi(part); err == nil {
+ continue
+ }
+
+ var ok bool
+ if target, ok = sm[part]; !ok {
+ return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s)", k, key)
+ }
+
+ if subResource, ok := target.Elem.(*Resource); ok {
+ sm = schemaMap(subResource.Schema)
+ }
+ }
+ if target == nil {
+ return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm)
+ }
+ if target.Required {
+ return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key)
+ }
+
+ if len(target.ComputedWhen) > 0 {
+ return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key)
+ }
+ }
+ }
+
+ if v.Type == TypeList || v.Type == TypeSet {
+ if v.Elem == nil {
+ return fmt.Errorf("%s: Elem must be set for lists", k)
+ }
+
+ if v.Default != nil {
+ return fmt.Errorf("%s: Default is not valid for lists or sets", k)
+ }
+
+ if v.Type != TypeSet && v.Set != nil {
+ return fmt.Errorf("%s: Set can only be set for TypeSet", k)
+ }
+
+ switch t := v.Elem.(type) {
+ case *Resource:
+ if err := t.InternalValidate(topSchemaMap, true); err != nil {
+ return err
+ }
+ case *Schema:
+ bad := t.Computed || t.Optional || t.Required
+ if bad {
+ return fmt.Errorf(
+ "%s: Elem must have only Type set", k)
+ }
+ }
+ } else {
+ if v.MaxItems > 0 || v.MinItems > 0 {
+ return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k)
+ }
+ }
+
+ // Computed-only field
+ if v.Computed && !v.Optional {
+ if v.ValidateFunc != nil {
+ return fmt.Errorf("%s: ValidateFunc is for validating user input, "+
+ "there's nothing to validate on computed-only field", k)
+ }
+ if v.DiffSuppressFunc != nil {
+ return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+
+ " between config and state representation. "+
+ "There is no config for computed-only field, nothing to compare.", k)
+ }
+ }
+
+ if v.ValidateFunc != nil {
+ switch v.Type {
+ case TypeList, TypeSet:
+ return fmt.Errorf("ValidateFunc is not yet supported on lists or sets.")
+ }
+ }
+ }
+
+ return nil
+}
+
+func (m schemaMap) diff(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+
+ unsupressedDiff := new(terraform.InstanceDiff)
+ unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff)
+
+ var err error
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ err = m.diffString(k, schema, unsupressedDiff, d, all)
+ case TypeList:
+ err = m.diffList(k, schema, unsupressedDiff, d, all)
+ case TypeMap:
+ err = m.diffMap(k, schema, unsupressedDiff, d, all)
+ case TypeSet:
+ err = m.diffSet(k, schema, unsupressedDiff, d, all)
+ default:
+ err = fmt.Errorf("%s: unknown type %#v", k, schema.Type)
+ }
+
+ for attrK, attrV := range unsupressedDiff.Attributes {
+ if schema.DiffSuppressFunc != nil &&
+ attrV != nil &&
+ schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, d) {
+ continue
+ }
+
+ diff.Attributes[attrK] = attrV
+ }
+
+ return err
+}
+
+func (m schemaMap) diffList(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+ o, n, _, computedList := d.diffChange(k)
+ if computedList {
+ n = nil
+ }
+ nSet := n != nil
+
+ // If we have an old value and no new value is set or will be
+ // computed once all variables can be interpolated and we're
+ // computed, then nothing has changed.
+ if o != nil && n == nil && !computedList && schema.Computed {
+ return nil
+ }
+
+ if o == nil {
+ o = []interface{}{}
+ }
+ if n == nil {
+ n = []interface{}{}
+ }
+ if s, ok := o.(*Set); ok {
+ o = s.List()
+ }
+ if s, ok := n.(*Set); ok {
+ n = s.List()
+ }
+ os := o.([]interface{})
+ vs := n.([]interface{})
+
+ // If the new value was set, and the two are equal, then we're done.
+ // We have to do this check here because sets might be NOT
+ // reflect.DeepEqual so we need to wait until we get the []interface{}
+ if !all && nSet && reflect.DeepEqual(os, vs) {
+ return nil
+ }
+
+ // Get the counts
+ oldLen := len(os)
+ newLen := len(vs)
+ oldStr := strconv.FormatInt(int64(oldLen), 10)
+
+ // If the whole list is computed, then say that the # is computed
+ if computedList {
+ diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{
+ Old: oldStr,
+ NewComputed: true,
+ RequiresNew: schema.ForceNew,
+ }
+ return nil
+ }
+
+ // If the counts are not the same, then record that diff
+ changed := oldLen != newLen
+ computed := oldLen == 0 && newLen == 0 && schema.Computed
+ if changed || computed || all {
+ countSchema := &Schema{
+ Type: TypeInt,
+ Computed: schema.Computed,
+ ForceNew: schema.ForceNew,
+ }
+
+ newStr := ""
+ if !computed {
+ newStr = strconv.FormatInt(int64(newLen), 10)
+ } else {
+ oldStr = ""
+ }
+
+ diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: oldStr,
+ New: newStr,
+ })
+ }
+
+ // Figure out the maximum
+ maxLen := oldLen
+ if newLen > maxLen {
+ maxLen = newLen
+ }
+
+ switch t := schema.Elem.(type) {
+ case *Resource:
+ // This is a complex resource
+ for i := 0; i < maxLen; i++ {
+ for k2, schema := range t.Schema {
+ subK := fmt.Sprintf("%s.%d.%s", k, i, k2)
+ err := m.diff(subK, schema, diff, d, all)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ case *Schema:
+ // Copy the schema so that we can set Computed/ForceNew from
+ // the parent schema (the TypeList).
+ t2 := *t
+ t2.ForceNew = schema.ForceNew
+
+ // This is just a primitive element, so go through each and
+ // just diff each.
+ for i := 0; i < maxLen; i++ {
+ subK := fmt.Sprintf("%s.%d", k, i)
+ err := m.diff(subK, &t2, diff, d, all)
+ if err != nil {
+ return err
+ }
+ }
+ default:
+ return fmt.Errorf("%s: unknown element type (internal)", k)
+ }
+
+ return nil
+}
+
+func (m schemaMap) diffMap(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+ prefix := k + "."
+
+ // First get all the values from the state
+ var stateMap, configMap map[string]string
+ o, n, _, nComputed := d.diffChange(k)
+ if err := mapstructure.WeakDecode(o, &stateMap); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ if err := mapstructure.WeakDecode(n, &configMap); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+
+ // Keep track of whether the state _exists_ at all prior to clearing it
+ stateExists := o != nil
+
+ // Delete any count values, since we don't use those
+ delete(configMap, "%")
+ delete(stateMap, "%")
+
+ // Check if the number of elements has changed.
+ oldLen, newLen := len(stateMap), len(configMap)
+ changed := oldLen != newLen
+ if oldLen != 0 && newLen == 0 && schema.Computed {
+ changed = false
+ }
+
+ // It is computed if we have no old value, no new value, the schema
+ // says it is computed, and it didn't exist in the state before. The
+ // last point means: if it existed in the state, even empty, then it
+ // has already been computed.
+ computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists
+
+ // If the count has changed or we're computed, then add a diff for the
+ // count. "nComputed" means that the new value _contains_ a value that
+ // is computed. We don't do granular diffs for this yet, so we mark the
+ // whole map as computed.
+ if changed || computed || nComputed {
+ countSchema := &Schema{
+ Type: TypeInt,
+ Computed: schema.Computed || nComputed,
+ ForceNew: schema.ForceNew,
+ }
+
+ oldStr := strconv.FormatInt(int64(oldLen), 10)
+ newStr := ""
+ if !computed && !nComputed {
+ newStr = strconv.FormatInt(int64(newLen), 10)
+ } else {
+ oldStr = ""
+ }
+
+ diff.Attributes[k+".%"] = countSchema.finalizeDiff(
+ &terraform.ResourceAttrDiff{
+ Old: oldStr,
+ New: newStr,
+ },
+ )
+ }
+
+ // If the new map is nil and we're computed, then ignore it.
+ if n == nil && schema.Computed {
+ return nil
+ }
+
+ // Now we compare, preferring values from the config map
+ for k, v := range configMap {
+ old, ok := stateMap[k]
+ delete(stateMap, k)
+
+ if old == v && ok && !all {
+ continue
+ }
+
+ diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: old,
+ New: v,
+ })
+ }
+ for k, v := range stateMap {
+ diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: v,
+ NewRemoved: true,
+ })
+ }
+
+ return nil
+}
+
+func (m schemaMap) diffSet(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+
+ o, n, _, computedSet := d.diffChange(k)
+ if computedSet {
+ n = nil
+ }
+ nSet := n != nil
+
+ // If we have an old value and no new value is set or will be
+ // computed once all variables can be interpolated and we're
+ // computed, then nothing has changed.
+ if o != nil && n == nil && !computedSet && schema.Computed {
+ return nil
+ }
+
+ if o == nil {
+ o = schema.ZeroValue().(*Set)
+ }
+ if n == nil {
+ n = schema.ZeroValue().(*Set)
+ }
+ os := o.(*Set)
+ ns := n.(*Set)
+
+ // If the new value was set, compare the listCode's to determine if
+ // the two are equal. Comparing listCode's instead of the actual values
+ // is needed because there could be computed values in the set which
+ // would result in false positives while comparing.
+ if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) {
+ return nil
+ }
+
+ // Get the counts
+ oldLen := os.Len()
+ newLen := ns.Len()
+ oldStr := strconv.Itoa(oldLen)
+ newStr := strconv.Itoa(newLen)
+
+ // Build a schema for our count
+ countSchema := &Schema{
+ Type: TypeInt,
+ Computed: schema.Computed,
+ ForceNew: schema.ForceNew,
+ }
+
+ // If the set computed then say that the # is computed
+ if computedSet || schema.Computed && !nSet {
+ // If # already exists, equals 0 and no new set is supplied, there
+ // is nothing to record in the diff
+ count, ok := d.GetOk(k + ".#")
+ if ok && count.(int) == 0 && !nSet && !computedSet {
+ return nil
+ }
+
+ // Set the count but make sure that if # does not exist, we don't
+ // use the zeroed value
+ countStr := strconv.Itoa(count.(int))
+ if !ok {
+ countStr = ""
+ }
+
+ diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: countStr,
+ NewComputed: true,
+ })
+ return nil
+ }
+
+ // If the counts are not the same, then record that diff
+ changed := oldLen != newLen
+ if changed || all {
+ diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: oldStr,
+ New: newStr,
+ })
+ }
+
+ // Build the list of codes that will make up our set. This is the
+ // removed codes as well as all the codes in the new codes.
+ codes := make([][]string, 2)
+ codes[0] = os.Difference(ns).listCode()
+ codes[1] = ns.listCode()
+ for _, list := range codes {
+ for _, code := range list {
+ switch t := schema.Elem.(type) {
+ case *Resource:
+ // This is a complex resource
+ for k2, schema := range t.Schema {
+ subK := fmt.Sprintf("%s.%s.%s", k, code, k2)
+ err := m.diff(subK, schema, diff, d, true)
+ if err != nil {
+ return err
+ }
+ }
+ case *Schema:
+ // Copy the schema so that we can set Computed/ForceNew from
+ // the parent schema (the TypeSet).
+ t2 := *t
+ t2.ForceNew = schema.ForceNew
+
+ // This is just a primitive element, so go through each and
+ // just diff each.
+ subK := fmt.Sprintf("%s.%s", k, code)
+ err := m.diff(subK, &t2, diff, d, true)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("%s: unknown element type (internal)", k)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (m schemaMap) diffString(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+ var originalN interface{}
+ var os, ns string
+ o, n, _, computed := d.diffChange(k)
+ if schema.StateFunc != nil && n != nil {
+ originalN = n
+ n = schema.StateFunc(n)
+ }
+ nraw := n
+ if nraw == nil && o != nil {
+ nraw = schema.Type.Zero()
+ }
+ if err := mapstructure.WeakDecode(o, &os); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ if err := mapstructure.WeakDecode(nraw, &ns); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+
+ if os == ns && !all {
+ // They're the same value. If there old value is not blank or we
+ // have an ID, then return right away since we're already setup.
+ if os != "" || d.Id() != "" {
+ return nil
+ }
+
+ // Otherwise, only continue if we're computed
+ if !schema.Computed && !computed {
+ return nil
+ }
+ }
+
+ removed := false
+ if o != nil && n == nil {
+ removed = true
+ }
+ if removed && schema.Computed {
+ return nil
+ }
+
+ diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: os,
+ New: ns,
+ NewExtra: originalN,
+ NewRemoved: removed,
+ NewComputed: computed,
+ })
+
+ return nil
+}
+
+func (m schemaMap) inputString(
+ input terraform.UIInput,
+ k string,
+ schema *Schema) (interface{}, error) {
+ result, err := input.Input(&terraform.InputOpts{
+ Id: k,
+ Query: k,
+ Description: schema.Description,
+ Default: schema.InputDefault,
+ })
+
+ return result, err
+}
+
+func (m schemaMap) validate(
+ k string,
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ raw, ok := c.Get(k)
+ if !ok && schema.DefaultFunc != nil {
+ // We have a dynamic default. Check if we have a value.
+ var err error
+ raw, err = schema.DefaultFunc()
+ if err != nil {
+ return nil, []error{fmt.Errorf(
+ "%q, error loading default: %s", k, err)}
+ }
+
+ // We're okay as long as we had a value set
+ ok = raw != nil
+ }
+ if !ok {
+ if schema.Required {
+ return nil, []error{fmt.Errorf(
+ "%q: required field is not set", k)}
+ }
+
+ return nil, nil
+ }
+
+ if !schema.Required && !schema.Optional {
+ // This is a computed-only field
+ return nil, []error{fmt.Errorf(
+ "%q: this field cannot be set", k)}
+ }
+
+ err := m.validateConflictingAttributes(k, schema, c)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ return m.validateType(k, raw, schema, c)
+}
+
+func (m schemaMap) validateConflictingAttributes(
+ k string,
+ schema *Schema,
+ c *terraform.ResourceConfig) error {
+
+ if len(schema.ConflictsWith) == 0 {
+ return nil
+ }
+
+ for _, conflicting_key := range schema.ConflictsWith {
+ if value, ok := c.Get(conflicting_key); ok {
+ return fmt.Errorf(
+ "%q: conflicts with %s (%#v)", k, conflicting_key, value)
+ }
+ }
+
+ return nil
+}
+
+func (m schemaMap) validateList(
+ k string,
+ raw interface{},
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ // We use reflection to verify the slice because you can't
+ // case to []interface{} unless the slice is exactly that type.
+ rawV := reflect.ValueOf(raw)
+
+ // If we support promotion and the raw value isn't a slice, wrap
+ // it in []interface{} and check again.
+ if schema.PromoteSingle && rawV.Kind() != reflect.Slice {
+ raw = []interface{}{raw}
+ rawV = reflect.ValueOf(raw)
+ }
+
+ if rawV.Kind() != reflect.Slice {
+ return nil, []error{fmt.Errorf(
+ "%s: should be a list", k)}
+ }
+
+ // Validate length
+ if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems {
+ return nil, []error{fmt.Errorf(
+ "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())}
+ }
+
+ if schema.MinItems > 0 && rawV.Len() < schema.MinItems {
+ return nil, []error{fmt.Errorf(
+ "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())}
+ }
+
+ // Now build the []interface{}
+ raws := make([]interface{}, rawV.Len())
+ for i, _ := range raws {
+ raws[i] = rawV.Index(i).Interface()
+ }
+
+ var ws []string
+ var es []error
+ for i, raw := range raws {
+ key := fmt.Sprintf("%s.%d", k, i)
+
+ // Reify the key value from the ResourceConfig.
+ // If the list was computed we have all raw values, but some of these
+ // may be known in the config, and aren't individually marked as Computed.
+ if r, ok := c.Get(key); ok {
+ raw = r
+ }
+
+ var ws2 []string
+ var es2 []error
+ switch t := schema.Elem.(type) {
+ case *Resource:
+ // This is a sub-resource
+ ws2, es2 = m.validateObject(key, t.Schema, c)
+ case *Schema:
+ ws2, es2 = m.validateType(key, raw, t, c)
+ }
+
+ if len(ws2) > 0 {
+ ws = append(ws, ws2...)
+ }
+ if len(es2) > 0 {
+ es = append(es, es2...)
+ }
+ }
+
+ return ws, es
+}
+
+func (m schemaMap) validateMap(
+ k string,
+ raw interface{},
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ // We use reflection to verify the slice because you can't
+ // case to []interface{} unless the slice is exactly that type.
+ rawV := reflect.ValueOf(raw)
+ switch rawV.Kind() {
+ case reflect.String:
+ // If raw and reified are equal, this is a string and should
+ // be rejected.
+ reified, reifiedOk := c.Get(k)
+ if reifiedOk && raw == reified && !c.IsComputed(k) {
+ return nil, []error{fmt.Errorf("%s: should be a map", k)}
+ }
+ // Otherwise it's likely raw is an interpolation.
+ return nil, nil
+ case reflect.Map:
+ case reflect.Slice:
+ default:
+ return nil, []error{fmt.Errorf("%s: should be a map", k)}
+ }
+
+ // If it is not a slice, validate directly
+ if rawV.Kind() != reflect.Slice {
+ mapIface := rawV.Interface()
+ if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 {
+ return nil, errs
+ }
+ if schema.ValidateFunc != nil {
+ return schema.ValidateFunc(mapIface, k)
+ }
+ return nil, nil
+ }
+
+ // It is a slice, verify that all the elements are maps
+ raws := make([]interface{}, rawV.Len())
+ for i, _ := range raws {
+ raws[i] = rawV.Index(i).Interface()
+ }
+
+ for _, raw := range raws {
+ v := reflect.ValueOf(raw)
+ if v.Kind() != reflect.Map {
+ return nil, []error{fmt.Errorf(
+ "%s: should be a map", k)}
+ }
+ mapIface := v.Interface()
+ if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 {
+ return nil, errs
+ }
+ }
+
+ if schema.ValidateFunc != nil {
+ validatableMap := make(map[string]interface{})
+ for _, raw := range raws {
+ for k, v := range raw.(map[string]interface{}) {
+ validatableMap[k] = v
+ }
+ }
+
+ return schema.ValidateFunc(validatableMap, k)
+ }
+
+ return nil, nil
+}
+
+func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) {
+ for key, raw := range m {
+ valueType, err := getValueType(k, schema)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ switch valueType {
+ case TypeBool:
+ var n bool
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+ }
+ case TypeInt:
+ var n int
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+ }
+ case TypeFloat:
+ var n float64
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+ }
+ case TypeString:
+ var n string
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+ }
+ default:
+ panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type))
+ }
+ }
+ return nil, nil
+}
+
+func getValueType(k string, schema *Schema) (ValueType, error) {
+ if schema.Elem == nil {
+ return TypeString, nil
+ }
+ if vt, ok := schema.Elem.(ValueType); ok {
+ return vt, nil
+ }
+
+ if s, ok := schema.Elem.(*Schema); ok {
+ if s.Elem == nil {
+ return TypeString, nil
+ }
+ if vt, ok := s.Elem.(ValueType); ok {
+ return vt, nil
+ }
+ }
+
+ if _, ok := schema.Elem.(*Resource); ok {
+ // TODO: We don't actually support this (yet)
+ // but silently pass the validation, until we decide
+ // how to handle nested structures in maps
+ return TypeString, nil
+ }
+ return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem)
+}
+
+func (m schemaMap) validateObject(
+ k string,
+ schema map[string]*Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ raw, _ := c.GetRaw(k)
+ if _, ok := raw.(map[string]interface{}); !ok {
+ return nil, []error{fmt.Errorf(
+ "%s: expected object, got %s",
+ k, reflect.ValueOf(raw).Kind())}
+ }
+
+ var ws []string
+ var es []error
+ for subK, s := range schema {
+ key := subK
+ if k != "" {
+ key = fmt.Sprintf("%s.%s", k, subK)
+ }
+
+ ws2, es2 := m.validate(key, s, c)
+ if len(ws2) > 0 {
+ ws = append(ws, ws2...)
+ }
+ if len(es2) > 0 {
+ es = append(es, es2...)
+ }
+ }
+
+ // Detect any extra/unknown keys and report those as errors.
+ if m, ok := raw.(map[string]interface{}); ok {
+ for subk, _ := range m {
+ if _, ok := schema[subk]; !ok {
+ if subk == TimeoutsConfigKey {
+ continue
+ }
+ es = append(es, fmt.Errorf(
+ "%s: invalid or unknown key: %s", k, subk))
+ }
+ }
+ }
+
+ return ws, es
+}
+
+func (m schemaMap) validatePrimitive(
+ k string,
+ raw interface{},
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+
+ // Catch if the user gave a complex type where a primitive was
+ // expected, so we can return a friendly error message that
+ // doesn't contain Go type system terminology.
+ switch reflect.ValueOf(raw).Type().Kind() {
+ case reflect.Slice:
+ return nil, []error{
+ fmt.Errorf("%s must be a single value, not a list", k),
+ }
+ case reflect.Map:
+ return nil, []error{
+ fmt.Errorf("%s must be a single value, not a map", k),
+ }
+ default: // ok
+ }
+
+ if c.IsComputed(k) {
+ // If the key is being computed, then it is not an error as
+ // long as it's not a slice or map.
+ return nil, nil
+ }
+
+ var decoded interface{}
+ switch schema.Type {
+ case TypeBool:
+ // Verify that we can parse this as the correct type
+ var n bool
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s: %s", k, err)}
+ }
+ decoded = n
+ case TypeInt:
+ // Verify that we can parse this as an int
+ var n int
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s: %s", k, err)}
+ }
+ decoded = n
+ case TypeFloat:
+ // Verify that we can parse this as an int
+ var n float64
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s: %s", k, err)}
+ }
+ decoded = n
+ case TypeString:
+ // Verify that we can parse this as a string
+ var n string
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s: %s", k, err)}
+ }
+ decoded = n
+ default:
+ panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type))
+ }
+
+ if schema.ValidateFunc != nil {
+ return schema.ValidateFunc(decoded, k)
+ }
+
+ return nil, nil
+}
+
+func (m schemaMap) validateType(
+ k string,
+ raw interface{},
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ var ws []string
+ var es []error
+ switch schema.Type {
+ case TypeSet, TypeList:
+ ws, es = m.validateList(k, raw, schema, c)
+ case TypeMap:
+ ws, es = m.validateMap(k, raw, schema, c)
+ default:
+ ws, es = m.validatePrimitive(k, raw, schema, c)
+ }
+
+ if schema.Deprecated != "" {
+ ws = append(ws, fmt.Sprintf(
+ "%q: [DEPRECATED] %s", k, schema.Deprecated))
+ }
+
+ if schema.Removed != "" {
+ es = append(es, fmt.Errorf(
+ "%q: [REMOVED] %s", k, schema.Removed))
+ }
+
+ return ws, es
+}
+
+// Zero returns the zero value for a type.
+func (t ValueType) Zero() interface{} {
+ switch t {
+ case TypeInvalid:
+ return nil
+ case TypeBool:
+ return false
+ case TypeInt:
+ return 0
+ case TypeFloat:
+ return 0.0
+ case TypeString:
+ return ""
+ case TypeList:
+ return []interface{}{}
+ case TypeMap:
+ return map[string]interface{}{}
+ case TypeSet:
+ return new(Set)
+ case typeObject:
+ return map[string]interface{}{}
+ default:
+ panic(fmt.Sprintf("unknown type %s", t))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go
new file mode 100644
index 00000000..3eb2d007
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go
@@ -0,0 +1,122 @@
+package schema
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+)
+
+func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) {
+ if val == nil {
+ buf.WriteRune(';')
+ return
+ }
+
+ switch schema.Type {
+ case TypeBool:
+ if val.(bool) {
+ buf.WriteRune('1')
+ } else {
+ buf.WriteRune('0')
+ }
+ case TypeInt:
+ buf.WriteString(strconv.Itoa(val.(int)))
+ case TypeFloat:
+ buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64))
+ case TypeString:
+ buf.WriteString(val.(string))
+ case TypeList:
+ buf.WriteRune('(')
+ l := val.([]interface{})
+ for _, innerVal := range l {
+ serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
+ }
+ buf.WriteRune(')')
+ case TypeMap:
+
+ m := val.(map[string]interface{})
+ var keys []string
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ buf.WriteRune('[')
+ for _, k := range keys {
+ innerVal := m[k]
+ if innerVal == nil {
+ continue
+ }
+ buf.WriteString(k)
+ buf.WriteRune(':')
+
+ switch innerVal := innerVal.(type) {
+ case int:
+ buf.WriteString(strconv.Itoa(innerVal))
+ case float64:
+ buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64))
+ case string:
+ buf.WriteString(innerVal)
+ default:
+ panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal))
+ }
+
+ buf.WriteRune(';')
+ }
+ buf.WriteRune(']')
+ case TypeSet:
+ buf.WriteRune('{')
+ s := val.(*Set)
+ for _, innerVal := range s.List() {
+ serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
+ }
+ buf.WriteRune('}')
+ default:
+ panic("unknown schema type to serialize")
+ }
+ buf.WriteRune(';')
+}
+
+// SerializeValueForHash appends a serialization of the given resource config
+// to the given buffer, guaranteeing deterministic results given the same value
+// and schema.
+//
+// Its primary purpose is as input into a hashing function in order
+// to hash complex substructures when used in sets, and so the serialization
+// is not reversible.
+func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) {
+ sm := resource.Schema
+ m := val.(map[string]interface{})
+ var keys []string
+ for k := range sm {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ innerSchema := sm[k]
+ // Skip attributes that are not user-provided. Computed attributes
+ // do not contribute to the hash since their ultimate value cannot
+ // be known at plan/diff time.
+ if !(innerSchema.Required || innerSchema.Optional) {
+ continue
+ }
+
+ buf.WriteString(k)
+ buf.WriteRune(':')
+ innerVal := m[k]
+ SerializeValueForHash(buf, innerVal, innerSchema)
+ }
+}
+
+func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) {
+ switch tElem := elem.(type) {
+ case *Schema:
+ SerializeValueForHash(buf, val, tElem)
+ case *Resource:
+ buf.WriteRune('<')
+ SerializeResourceForHash(buf, val, tElem)
+ buf.WriteString(">;")
+ default:
+ panic(fmt.Sprintf("invalid element type: %T", tElem))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
new file mode 100644
index 00000000..de05f40e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
@@ -0,0 +1,209 @@
+package schema
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+
+ "github.com/hashicorp/terraform/helper/hashcode"
+)
+
+// HashString hashes strings. If you want a Set of strings, this is the
+// SchemaSetFunc you want.
+func HashString(v interface{}) int {
+ return hashcode.String(v.(string))
+}
+
+// HashResource hashes complex structures that are described using
+// a *Resource. This is the default set implementation used when a set's
+// element type is a full resource.
+func HashResource(resource *Resource) SchemaSetFunc {
+ return func(v interface{}) int {
+ var buf bytes.Buffer
+ SerializeResourceForHash(&buf, v, resource)
+ return hashcode.String(buf.String())
+ }
+}
+
+// HashSchema hashes values that are described using a *Schema. This is the
+// default set implementation used when a set's element type is a single
+// schema.
+func HashSchema(schema *Schema) SchemaSetFunc {
+ return func(v interface{}) int {
+ var buf bytes.Buffer
+ SerializeValueForHash(&buf, v, schema)
+ return hashcode.String(buf.String())
+ }
+}
+
+// Set is a set data structure that is returned for elements of type
+// TypeSet.
+type Set struct {
+ F SchemaSetFunc
+
+ m map[string]interface{}
+ once sync.Once
+}
+
+// NewSet is a convenience method for creating a new set with the given
+// items.
+func NewSet(f SchemaSetFunc, items []interface{}) *Set {
+ s := &Set{F: f}
+ for _, i := range items {
+ s.Add(i)
+ }
+
+ return s
+}
+
+// CopySet returns a copy of another set.
+func CopySet(otherSet *Set) *Set {
+ return NewSet(otherSet.F, otherSet.List())
+}
+
+// Add adds an item to the set if it isn't already in the set.
+func (s *Set) Add(item interface{}) {
+ s.add(item, false)
+}
+
+// Remove removes an item if it's already in the set. Idempotent.
+func (s *Set) Remove(item interface{}) {
+ s.remove(item)
+}
+
+// Contains checks if the set has the given item.
+func (s *Set) Contains(item interface{}) bool {
+ _, ok := s.m[s.hash(item)]
+ return ok
+}
+
+// Len returns the amount of items in the set.
+func (s *Set) Len() int {
+ return len(s.m)
+}
+
+// List returns the elements of this set in slice format.
+//
+// The order of the returned elements is deterministic. Given the same
+// set, the order of this will always be the same.
+func (s *Set) List() []interface{} {
+ result := make([]interface{}, len(s.m))
+ for i, k := range s.listCode() {
+ result[i] = s.m[k]
+ }
+
+ return result
+}
+
+// Difference performs a set difference of the two sets, returning
+// a new third set that has only the elements unique to this set.
+func (s *Set) Difference(other *Set) *Set {
+ result := &Set{F: s.F}
+ result.once.Do(result.init)
+
+ for k, v := range s.m {
+ if _, ok := other.m[k]; !ok {
+ result.m[k] = v
+ }
+ }
+
+ return result
+}
+
+// Intersection performs the set intersection of the two sets
+// and returns a new third set.
+func (s *Set) Intersection(other *Set) *Set {
+ result := &Set{F: s.F}
+ result.once.Do(result.init)
+
+ for k, v := range s.m {
+ if _, ok := other.m[k]; ok {
+ result.m[k] = v
+ }
+ }
+
+ return result
+}
+
+// Union performs the set union of the two sets and returns a new third
+// set.
+func (s *Set) Union(other *Set) *Set {
+ result := &Set{F: s.F}
+ result.once.Do(result.init)
+
+ for k, v := range s.m {
+ result.m[k] = v
+ }
+ for k, v := range other.m {
+ result.m[k] = v
+ }
+
+ return result
+}
+
+func (s *Set) Equal(raw interface{}) bool {
+ other, ok := raw.(*Set)
+ if !ok {
+ return false
+ }
+
+ return reflect.DeepEqual(s.m, other.m)
+}
+
+func (s *Set) GoString() string {
+ return fmt.Sprintf("*Set(%#v)", s.m)
+}
+
+func (s *Set) init() {
+ s.m = make(map[string]interface{})
+}
+
+func (s *Set) add(item interface{}, computed bool) string {
+ s.once.Do(s.init)
+
+ code := s.hash(item)
+ if computed {
+ code = "~" + code
+ }
+
+ if _, ok := s.m[code]; !ok {
+ s.m[code] = item
+ }
+
+ return code
+}
+
+func (s *Set) hash(item interface{}) string {
+ code := s.F(item)
+ // Always return a nonnegative hashcode.
+ if code < 0 {
+ code = -code
+ }
+ return strconv.Itoa(code)
+}
+
+func (s *Set) remove(item interface{}) string {
+ s.once.Do(s.init)
+
+ code := s.hash(item)
+ delete(s.m, code)
+
+ return code
+}
+
+func (s *Set) index(item interface{}) int {
+ return sort.SearchStrings(s.listCode(), s.hash(item))
+}
+
+func (s *Set) listCode() []string {
+ // Sort the hash codes so the order of the list is deterministic
+ keys := make([]string, 0, len(s.m))
+ for k := range s.m {
+ keys = append(keys, k)
+ }
+ sort.Sort(sort.StringSlice(keys))
+ return keys
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
new file mode 100644
index 00000000..9765bdbc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
@@ -0,0 +1,30 @@
+package schema
+
+import (
+ "testing"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// TestResourceDataRaw creates a ResourceData from a raw configuration map.
+func TestResourceDataRaw(
+ t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData {
+ c, err := config.NewRawConfig(raw)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ sm := schemaMap(schema)
+ diff, err := sm.Diff(nil, terraform.NewResourceConfig(c))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ result, err := sm.Data(nil, diff)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go
new file mode 100644
index 00000000..9286987d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go
@@ -0,0 +1,21 @@
+package schema
+
+//go:generate stringer -type=ValueType valuetype.go
+
+// ValueType is an enum of the type that can be represented by a schema.
+type ValueType int
+
+const (
+ TypeInvalid ValueType = iota
+ TypeBool
+ TypeInt
+ TypeFloat
+ TypeString
+ TypeList
+ TypeMap
+ TypeSet
+ typeObject
+)
+
+// NOTE: ValueType has more functions defined on it in schema.go. We can't
+// put them here because we reference other files.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
new file mode 100644
index 00000000..1610cec2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT.
+
+package schema
+
+import "fmt"
+
+const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject"
+
+var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77}
+
+func (i ValueType) String() string {
+ if i < 0 || i >= ValueType(len(_ValueType_index)-1) {
+ return fmt.Sprintf("ValueType(%d)", i)
+ }
+ return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
new file mode 100644
index 00000000..7edd5e75
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
@@ -0,0 +1,80 @@
+package shadow
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// Close will close all shadow values within the given structure.
+//
+// This uses reflection to walk the structure, find all shadow elements,
+// and close them. Currently this will only find struct fields that are
+// shadow values, and not slice elements, etc.
+func Close(v interface{}) error {
+ // We require a pointer so we can address the internal fields
+ val := reflect.ValueOf(v)
+ if val.Kind() != reflect.Ptr {
+ return fmt.Errorf("value must be a pointer")
+ }
+
+ // Walk and close
+ var w closeWalker
+ if err := reflectwalk.Walk(v, &w); err != nil {
+ return err
+ }
+
+ return w.Err
+}
+
+type closeWalker struct {
+ Err error
+}
+
+func (w *closeWalker) Struct(reflect.Value) error {
+ // Do nothing. We implement this for reflectwalk.StructWalker
+ return nil
+}
+
+func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error {
+ // Not sure why this would be but lets avoid some panics
+ if !v.IsValid() {
+ return nil
+ }
+
+ // Empty for exported, so don't check unexported fields
+ if f.PkgPath != "" {
+ return nil
+ }
+
+ // Verify the io.Closer is in this package
+ typ := v.Type()
+ if typ.PkgPath() != "github.com/hashicorp/terraform/helper/shadow" {
+ return nil
+ }
+
+ // We're looking for an io.Closer
+ raw := v.Interface()
+ if raw == nil {
+ return nil
+ }
+
+ closer, ok := raw.(io.Closer)
+ if !ok && v.CanAddr() {
+ closer, ok = v.Addr().Interface().(io.Closer)
+ }
+ if !ok {
+ return reflectwalk.SkipEntry
+ }
+
+ // Close it
+ if err := closer.Close(); err != nil {
+ w.Err = multierror.Append(w.Err, err)
+ }
+
+ // Don't go into the struct field
+ return reflectwalk.SkipEntry
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
new file mode 100644
index 00000000..4223e925
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
@@ -0,0 +1,128 @@
+package shadow
+
+import (
+ "sync"
+)
+
+// ComparedValue is a struct that finds a value by comparing some key
+// to the list of stored values. This is useful when there is no easy
+// uniquely identifying key that works in a map (for that, use KeyedValue).
+//
+// ComparedValue is very expensive, relative to other Value types. Try to
+// limit the number of values stored in a ComparedValue by potentially
+// nesting it within a KeyedValue (a keyed value points to a compared value,
+// for example).
+type ComparedValue struct {
+ // Func is a function that is given the lookup key and a single
+ // stored value. If it matches, it returns true.
+ Func func(k, v interface{}) bool
+
+ lock sync.Mutex
+ once sync.Once
+ closed bool
+ values []interface{}
+ waiters map[interface{}]*Value
+}
+
+// Close closes the value. This can never fail. For a definition of
+// "close" see the ErrClosed docs.
+func (w *ComparedValue) Close() error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // Set closed to true always
+ w.closed = true
+
+ // For all waiters, complete with ErrClosed
+ for k, val := range w.waiters {
+ val.SetValue(ErrClosed)
+ delete(w.waiters, k)
+ }
+
+ return nil
+}
+
+// Value returns the value that was set for the given key, or blocks
+// until one is available.
+func (w *ComparedValue) Value(k interface{}) interface{} {
+ v, val := w.valueWaiter(k)
+ if val == nil {
+ return v
+ }
+
+ return val.Value()
+}
+
+// ValueOk gets the value for the given key, returning immediately if the
+// value doesn't exist. The second return argument is true if the value exists.
+func (w *ComparedValue) ValueOk(k interface{}) (interface{}, bool) {
+ v, val := w.valueWaiter(k)
+ return v, val == nil
+}
+
+func (w *ComparedValue) SetValue(v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.once.Do(w.init)
+
+ // Check if we already have this exact value (by simply comparing
+ // with == directly). If we do, then we don't insert it again.
+ found := false
+ for _, v2 := range w.values {
+ if v == v2 {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ // Set the value, always
+ w.values = append(w.values, v)
+ }
+
+ // Go through the waiters
+ for k, val := range w.waiters {
+ if w.Func(k, v) {
+ val.SetValue(v)
+ delete(w.waiters, k)
+ }
+ }
+}
+
+func (w *ComparedValue) valueWaiter(k interface{}) (interface{}, *Value) {
+ w.lock.Lock()
+ w.once.Do(w.init)
+
+ // Look for a pre-existing value
+ for _, v := range w.values {
+ if w.Func(k, v) {
+ w.lock.Unlock()
+ return v, nil
+ }
+ }
+
+ // If we're closed, return that
+ if w.closed {
+ w.lock.Unlock()
+ return ErrClosed, nil
+ }
+
+ // Pre-existing value doesn't exist, create a waiter
+ val := w.waiters[k]
+ if val == nil {
+ val = new(Value)
+ w.waiters[k] = val
+ }
+ w.lock.Unlock()
+
+ // Return the waiter
+ return nil, val
+}
+
+// Must be called with w.lock held.
+func (w *ComparedValue) init() {
+ w.waiters = make(map[interface{}]*Value)
+ if w.Func == nil {
+ w.Func = func(k, v interface{}) bool { return k == v }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
new file mode 100644
index 00000000..432b0366
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
@@ -0,0 +1,151 @@
+package shadow
+
+import (
+ "sync"
+)
+
+// KeyedValue is a struct that coordinates a value by key. If a value is
+// not available for a give key, it'll block until it is available.
+type KeyedValue struct {
+ lock sync.Mutex
+ once sync.Once
+ values map[string]interface{}
+ waiters map[string]*Value
+ closed bool
+}
+
+// Close closes the value. This can never fail. For a definition of
+// "close" see the ErrClosed docs.
+func (w *KeyedValue) Close() error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // Set closed to true always
+ w.closed = true
+
+ // For all waiters, complete with ErrClosed
+ for k, val := range w.waiters {
+ val.SetValue(ErrClosed)
+ delete(w.waiters, k)
+ }
+
+ return nil
+}
+
+// Value returns the value that was set for the given key, or blocks
+// until one is available.
+func (w *KeyedValue) Value(k string) interface{} {
+ w.lock.Lock()
+ v, val := w.valueWaiter(k)
+ w.lock.Unlock()
+
+ // If we have no waiter, then return the value
+ if val == nil {
+ return v
+ }
+
+ // We have a waiter, so wait
+ return val.Value()
+}
+
+// WaitForChange waits for the value with the given key to be set again.
+// If the key isn't set, it'll wait for an initial value. Note that while
+// it is called "WaitForChange", the value isn't guaranteed to _change_;
+// this will return when a SetValue is called for the given k.
+func (w *KeyedValue) WaitForChange(k string) interface{} {
+ w.lock.Lock()
+ w.once.Do(w.init)
+
+ // If we're closed, we're closed
+ if w.closed {
+ w.lock.Unlock()
+ return ErrClosed
+ }
+
+ // Check for an active waiter. If there isn't one, make it
+ val := w.waiters[k]
+ if val == nil {
+ val = new(Value)
+ w.waiters[k] = val
+ }
+ w.lock.Unlock()
+
+ // And wait
+ return val.Value()
+}
+
+// ValueOk gets the value for the given key, returning immediately if the
+// value doesn't exist. The second return argument is true if the value exists.
+func (w *KeyedValue) ValueOk(k string) (interface{}, bool) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ v, val := w.valueWaiter(k)
+ return v, val == nil
+}
+
+func (w *KeyedValue) SetValue(k string, v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.setValue(k, v)
+}
+
+// Init will initialize the key to a given value only if the key has
+// not been set before. This is safe to call multiple times and in parallel.
+func (w *KeyedValue) Init(k string, v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // If we have a waiter, set the value.
+ _, val := w.valueWaiter(k)
+ if val != nil {
+ w.setValue(k, v)
+ }
+}
+
+// Must be called with w.lock held.
+func (w *KeyedValue) init() {
+ w.values = make(map[string]interface{})
+ w.waiters = make(map[string]*Value)
+}
+
+// setValue is like SetValue but assumes the lock is held.
+func (w *KeyedValue) setValue(k string, v interface{}) {
+ w.once.Do(w.init)
+
+ // Set the value, always
+ w.values[k] = v
+
+ // If we have a waiter, set it
+ if val, ok := w.waiters[k]; ok {
+ val.SetValue(v)
+ delete(w.waiters, k)
+ }
+}
+
+// valueWaiter gets the value or the Value waiter for a given key.
+//
+// This must be called with lock held.
+func (w *KeyedValue) valueWaiter(k string) (interface{}, *Value) {
+ w.once.Do(w.init)
+
+ // If we have this value already, return it
+ if v, ok := w.values[k]; ok {
+ return v, nil
+ }
+
+ // If we're closed, return that
+ if w.closed {
+ return ErrClosed, nil
+ }
+
+ // No pending value, check for a waiter
+ val := w.waiters[k]
+ if val == nil {
+ val = new(Value)
+ w.waiters[k] = val
+ }
+
+ // Return the waiter
+ return nil, val
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
new file mode 100644
index 00000000..0a43d4d4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
@@ -0,0 +1,66 @@
+package shadow
+
+import (
+ "container/list"
+ "sync"
+)
+
+// OrderedValue is a struct that keeps track of a value in the order
+// it is set. Each time Value() is called, it will return the most recent
+// calls value then discard it.
+//
+// This is unlike Value that returns the same value once it is set.
+type OrderedValue struct {
+ lock sync.Mutex
+ values *list.List
+ waiters *list.List
+}
+
+// Value returns the last value that was set, or blocks until one
+// is received.
+func (w *OrderedValue) Value() interface{} {
+ w.lock.Lock()
+
+ // If we have a pending value already, use it
+ if w.values != nil && w.values.Len() > 0 {
+ front := w.values.Front()
+ w.values.Remove(front)
+ w.lock.Unlock()
+ return front.Value
+ }
+
+ // No pending value, create a waiter
+ if w.waiters == nil {
+ w.waiters = list.New()
+ }
+
+ var val Value
+ w.waiters.PushBack(&val)
+ w.lock.Unlock()
+
+ // Return the value once we have it
+ return val.Value()
+}
+
+// SetValue sets the latest value.
+func (w *OrderedValue) SetValue(v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // If we have a waiter, notify it
+ if w.waiters != nil && w.waiters.Len() > 0 {
+ front := w.waiters.Front()
+ w.waiters.Remove(front)
+
+ val := front.Value.(*Value)
+ val.SetValue(v)
+ return
+ }
+
+ // Add it to the list of values
+ if w.values == nil {
+ w.values = list.New()
+ }
+
+ w.values.PushBack(v)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
new file mode 100644
index 00000000..2413335b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
@@ -0,0 +1,79 @@
+package shadow
+
+import (
+ "errors"
+ "sync"
+)
+
+// ErrClosed is returned by any closed values.
+//
+// A "closed value" is when the shadow has been notified that the real
+// side is complete and any blocking values will _never_ be satisfied
+// in the future. In this case, this error is returned. If a value is already
+// available, that is still returned.
+var ErrClosed = errors.New("shadow closed")
+
+// Value is a struct that coordinates a value between two
+// parallel routines. It is similar to atomic.Value except that when
+// Value is called if it isn't set it will wait for it.
+//
+// The Value can be closed with Close, which will cause any future
+// blocking operations to return immediately with ErrClosed.
+type Value struct {
+ lock sync.Mutex
+ cond *sync.Cond
+ value interface{}
+ valueSet bool
+}
+
+// Close closes the value. This can never fail. For a definition of
+// "close" see the struct docs.
+func (w *Value) Close() error {
+ w.lock.Lock()
+ set := w.valueSet
+ w.lock.Unlock()
+
+ // If we haven't set the value, set it
+ if !set {
+ w.SetValue(ErrClosed)
+ }
+
+ // Done
+ return nil
+}
+
+// Value returns the value that was set.
+func (w *Value) Value() interface{} {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // If we already have a value just return
+ for !w.valueSet {
+ // No value, setup the condition variable if we have to
+ if w.cond == nil {
+ w.cond = sync.NewCond(&w.lock)
+ }
+
+ // Wait on it
+ w.cond.Wait()
+ }
+
+ // Return the value
+ return w.value
+}
+
+// SetValue sets the value.
+func (w *Value) SetValue(v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // Set the value
+ w.valueSet = true
+ w.value = v
+
+ // If we have a condition, clear it
+ if w.cond != nil {
+ w.cond.Broadcast()
+ w.cond = nil
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/plugin.go b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
new file mode 100644
index 00000000..00fa7b29
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
@@ -0,0 +1,13 @@
+package plugin
+
+import (
+ "github.com/hashicorp/go-plugin"
+)
+
+// See serve.go for serving plugins
+
+// PluginMap should be used by clients for the map of plugins.
+var PluginMap = map[string]plugin.Plugin{
+ "provider": &ResourceProviderPlugin{},
+ "provisioner": &ResourceProvisionerPlugin{},
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
new file mode 100644
index 00000000..473f7860
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
@@ -0,0 +1,578 @@
+package plugin
+
+import (
+ "net/rpc"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// ResourceProviderPlugin is the plugin.Plugin implementation.
+type ResourceProviderPlugin struct {
+ F func() terraform.ResourceProvider
+}
+
+func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
+ return &ResourceProviderServer{Broker: b, Provider: p.F()}, nil
+}
+
+func (p *ResourceProviderPlugin) Client(
+ b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
+ return &ResourceProvider{Broker: b, Client: c}, nil
+}
+
+// ResourceProvider is an implementation of terraform.ResourceProvider
+// that communicates over RPC.
+type ResourceProvider struct {
+ Broker *plugin.MuxBroker
+ Client *rpc.Client
+}
+
+func (p *ResourceProvider) Stop() error {
+ var resp ResourceProviderStopResponse
+ err := p.Client.Call("Plugin.Stop", new(interface{}), &resp)
+ if err != nil {
+ return err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return err
+}
+
+func (p *ResourceProvider) Input(
+ input terraform.UIInput,
+ c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+ id := p.Broker.NextId()
+ go p.Broker.AcceptAndServe(id, &UIInputServer{
+ UIInput: input,
+ })
+
+ var resp ResourceProviderInputResponse
+ args := ResourceProviderInputArgs{
+ InputId: id,
+ Config: c,
+ }
+
+ err := p.Client.Call("Plugin.Input", &args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ return nil, err
+ }
+
+ return resp.Config, nil
+}
+
+func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ var resp ResourceProviderValidateResponse
+ args := ResourceProviderValidateArgs{
+ Config: c,
+ }
+
+ err := p.Client.Call("Plugin.Validate", &args, &resp)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ var errs []error
+ if len(resp.Errors) > 0 {
+ errs = make([]error, len(resp.Errors))
+ for i, err := range resp.Errors {
+ errs[i] = err
+ }
+ }
+
+ return resp.Warnings, errs
+}
+
+func (p *ResourceProvider) ValidateResource(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ var resp ResourceProviderValidateResourceResponse
+ args := ResourceProviderValidateResourceArgs{
+ Config: c,
+ Type: t,
+ }
+
+ err := p.Client.Call("Plugin.ValidateResource", &args, &resp)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ var errs []error
+ if len(resp.Errors) > 0 {
+ errs = make([]error, len(resp.Errors))
+ for i, err := range resp.Errors {
+ errs[i] = err
+ }
+ }
+
+ return resp.Warnings, errs
+}
+
+func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error {
+ var resp ResourceProviderConfigureResponse
+ err := p.Client.Call("Plugin.Configure", c, &resp)
+ if err != nil {
+ return err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return err
+}
+
+func (p *ResourceProvider) Apply(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+ var resp ResourceProviderApplyResponse
+ args := &ResourceProviderApplyArgs{
+ Info: info,
+ State: s,
+ Diff: d,
+ }
+
+ err := p.Client.Call("Plugin.Apply", args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return resp.State, err
+}
+
+func (p *ResourceProvider) Diff(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+ var resp ResourceProviderDiffResponse
+ args := &ResourceProviderDiffArgs{
+ Info: info,
+ State: s,
+ Config: c,
+ }
+ err := p.Client.Call("Plugin.Diff", args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return resp.Diff, err
+}
+
+func (p *ResourceProvider) ValidateDataSource(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ var resp ResourceProviderValidateResourceResponse
+ args := ResourceProviderValidateResourceArgs{
+ Config: c,
+ Type: t,
+ }
+
+ err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ var errs []error
+ if len(resp.Errors) > 0 {
+ errs = make([]error, len(resp.Errors))
+ for i, err := range resp.Errors {
+ errs[i] = err
+ }
+ }
+
+ return resp.Warnings, errs
+}
+
+func (p *ResourceProvider) Refresh(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState) (*terraform.InstanceState, error) {
+ var resp ResourceProviderRefreshResponse
+ args := &ResourceProviderRefreshArgs{
+ Info: info,
+ State: s,
+ }
+
+ err := p.Client.Call("Plugin.Refresh", args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return resp.State, err
+}
+
+func (p *ResourceProvider) ImportState(
+ info *terraform.InstanceInfo,
+ id string) ([]*terraform.InstanceState, error) {
+ var resp ResourceProviderImportStateResponse
+ args := &ResourceProviderImportStateArgs{
+ Info: info,
+ Id: id,
+ }
+
+ err := p.Client.Call("Plugin.ImportState", args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return resp.State, err
+}
+
+func (p *ResourceProvider) Resources() []terraform.ResourceType {
+ var result []terraform.ResourceType
+
+ err := p.Client.Call("Plugin.Resources", new(interface{}), &result)
+ if err != nil {
+ // TODO: panic, log, what?
+ return nil
+ }
+
+ return result
+}
+
+func (p *ResourceProvider) ReadDataDiff(
+ info *terraform.InstanceInfo,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+ var resp ResourceProviderReadDataDiffResponse
+ args := &ResourceProviderReadDataDiffArgs{
+ Info: info,
+ Config: c,
+ }
+
+ err := p.Client.Call("Plugin.ReadDataDiff", args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return resp.Diff, err
+}
+
+func (p *ResourceProvider) ReadDataApply(
+ info *terraform.InstanceInfo,
+ d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+ var resp ResourceProviderReadDataApplyResponse
+ args := &ResourceProviderReadDataApplyArgs{
+ Info: info,
+ Diff: d,
+ }
+
+ err := p.Client.Call("Plugin.ReadDataApply", args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return resp.State, err
+}
+
+func (p *ResourceProvider) DataSources() []terraform.DataSource {
+ var result []terraform.DataSource
+
+ err := p.Client.Call("Plugin.DataSources", new(interface{}), &result)
+ if err != nil {
+ // TODO: panic, log, what?
+ return nil
+ }
+
+ return result
+}
+
+func (p *ResourceProvider) Close() error {
+ return p.Client.Close()
+}
+
+// ResourceProviderServer is a net/rpc compatible structure for serving
+// a ResourceProvider. This should not be used directly.
+type ResourceProviderServer struct {
+ Broker *plugin.MuxBroker
+ Provider terraform.ResourceProvider
+}
+
+type ResourceProviderStopResponse struct {
+ Error *plugin.BasicError
+}
+
+type ResourceProviderConfigureResponse struct {
+ Error *plugin.BasicError
+}
+
+type ResourceProviderInputArgs struct {
+ InputId uint32
+ Config *terraform.ResourceConfig
+}
+
+type ResourceProviderInputResponse struct {
+ Config *terraform.ResourceConfig
+ Error *plugin.BasicError
+}
+
+type ResourceProviderApplyArgs struct {
+ Info *terraform.InstanceInfo
+ State *terraform.InstanceState
+ Diff *terraform.InstanceDiff
+}
+
+type ResourceProviderApplyResponse struct {
+ State *terraform.InstanceState
+ Error *plugin.BasicError
+}
+
+type ResourceProviderDiffArgs struct {
+ Info *terraform.InstanceInfo
+ State *terraform.InstanceState
+ Config *terraform.ResourceConfig
+}
+
+type ResourceProviderDiffResponse struct {
+ Diff *terraform.InstanceDiff
+ Error *plugin.BasicError
+}
+
+type ResourceProviderRefreshArgs struct {
+ Info *terraform.InstanceInfo
+ State *terraform.InstanceState
+}
+
+type ResourceProviderRefreshResponse struct {
+ State *terraform.InstanceState
+ Error *plugin.BasicError
+}
+
+type ResourceProviderImportStateArgs struct {
+ Info *terraform.InstanceInfo
+ Id string
+}
+
+type ResourceProviderImportStateResponse struct {
+ State []*terraform.InstanceState
+ Error *plugin.BasicError
+}
+
+type ResourceProviderReadDataApplyArgs struct {
+ Info *terraform.InstanceInfo
+ Diff *terraform.InstanceDiff
+}
+
+type ResourceProviderReadDataApplyResponse struct {
+ State *terraform.InstanceState
+ Error *plugin.BasicError
+}
+
+type ResourceProviderReadDataDiffArgs struct {
+ Info *terraform.InstanceInfo
+ Config *terraform.ResourceConfig
+}
+
+type ResourceProviderReadDataDiffResponse struct {
+ Diff *terraform.InstanceDiff
+ Error *plugin.BasicError
+}
+
+type ResourceProviderValidateArgs struct {
+ Config *terraform.ResourceConfig
+}
+
+type ResourceProviderValidateResponse struct {
+ Warnings []string
+ Errors []*plugin.BasicError
+}
+
+type ResourceProviderValidateResourceArgs struct {
+ Config *terraform.ResourceConfig
+ Type string
+}
+
+type ResourceProviderValidateResourceResponse struct {
+ Warnings []string
+ Errors []*plugin.BasicError
+}
+
+func (s *ResourceProviderServer) Stop(
+ _ interface{},
+ reply *ResourceProviderStopResponse) error {
+ err := s.Provider.Stop()
+ *reply = ResourceProviderStopResponse{
+ Error: plugin.NewBasicError(err),
+ }
+
+ return nil
+}
+
+func (s *ResourceProviderServer) Input(
+ args *ResourceProviderInputArgs,
+ reply *ResourceProviderInputResponse) error {
+ conn, err := s.Broker.Dial(args.InputId)
+ if err != nil {
+ *reply = ResourceProviderInputResponse{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+ }
+ client := rpc.NewClient(conn)
+ defer client.Close()
+
+ input := &UIInput{Client: client}
+
+ config, err := s.Provider.Input(input, args.Config)
+ *reply = ResourceProviderInputResponse{
+ Config: config,
+ Error: plugin.NewBasicError(err),
+ }
+
+ return nil
+}
+
+func (s *ResourceProviderServer) Validate(
+ args *ResourceProviderValidateArgs,
+ reply *ResourceProviderValidateResponse) error {
+ warns, errs := s.Provider.Validate(args.Config)
+ berrs := make([]*plugin.BasicError, len(errs))
+ for i, err := range errs {
+ berrs[i] = plugin.NewBasicError(err)
+ }
+ *reply = ResourceProviderValidateResponse{
+ Warnings: warns,
+ Errors: berrs,
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) ValidateResource(
+ args *ResourceProviderValidateResourceArgs,
+ reply *ResourceProviderValidateResourceResponse) error {
+ warns, errs := s.Provider.ValidateResource(args.Type, args.Config)
+ berrs := make([]*plugin.BasicError, len(errs))
+ for i, err := range errs {
+ berrs[i] = plugin.NewBasicError(err)
+ }
+ *reply = ResourceProviderValidateResourceResponse{
+ Warnings: warns,
+ Errors: berrs,
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) Configure(
+ config *terraform.ResourceConfig,
+ reply *ResourceProviderConfigureResponse) error {
+ err := s.Provider.Configure(config)
+ *reply = ResourceProviderConfigureResponse{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) Apply(
+ args *ResourceProviderApplyArgs,
+ result *ResourceProviderApplyResponse) error {
+ state, err := s.Provider.Apply(args.Info, args.State, args.Diff)
+ *result = ResourceProviderApplyResponse{
+ State: state,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) Diff(
+ args *ResourceProviderDiffArgs,
+ result *ResourceProviderDiffResponse) error {
+ diff, err := s.Provider.Diff(args.Info, args.State, args.Config)
+ *result = ResourceProviderDiffResponse{
+ Diff: diff,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) Refresh(
+ args *ResourceProviderRefreshArgs,
+ result *ResourceProviderRefreshResponse) error {
+ newState, err := s.Provider.Refresh(args.Info, args.State)
+ *result = ResourceProviderRefreshResponse{
+ State: newState,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) ImportState(
+ args *ResourceProviderImportStateArgs,
+ result *ResourceProviderImportStateResponse) error {
+ states, err := s.Provider.ImportState(args.Info, args.Id)
+ *result = ResourceProviderImportStateResponse{
+ State: states,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) Resources(
+ nothing interface{},
+ result *[]terraform.ResourceType) error {
+ *result = s.Provider.Resources()
+ return nil
+}
+
+func (s *ResourceProviderServer) ValidateDataSource(
+ args *ResourceProviderValidateResourceArgs,
+ reply *ResourceProviderValidateResourceResponse) error {
+ warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config)
+ berrs := make([]*plugin.BasicError, len(errs))
+ for i, err := range errs {
+ berrs[i] = plugin.NewBasicError(err)
+ }
+ *reply = ResourceProviderValidateResourceResponse{
+ Warnings: warns,
+ Errors: berrs,
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) ReadDataDiff(
+ args *ResourceProviderReadDataDiffArgs,
+ result *ResourceProviderReadDataDiffResponse) error {
+ diff, err := s.Provider.ReadDataDiff(args.Info, args.Config)
+ *result = ResourceProviderReadDataDiffResponse{
+ Diff: diff,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) ReadDataApply(
+ args *ResourceProviderReadDataApplyArgs,
+ result *ResourceProviderReadDataApplyResponse) error {
+ newState, err := s.Provider.ReadDataApply(args.Info, args.Diff)
+ *result = ResourceProviderReadDataApplyResponse{
+ State: newState,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) DataSources(
+ nothing interface{},
+ result *[]terraform.DataSource) error {
+ *result = s.Provider.DataSources()
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
new file mode 100644
index 00000000..8fce9d8a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
@@ -0,0 +1,173 @@
+package plugin
+
+import (
+ "net/rpc"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// ResourceProvisionerPlugin is the plugin.Plugin implementation.
+type ResourceProvisionerPlugin struct {
+ F func() terraform.ResourceProvisioner
+}
+
+func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
+ return &ResourceProvisionerServer{Broker: b, Provisioner: p.F()}, nil
+}
+
+func (p *ResourceProvisionerPlugin) Client(
+ b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
+ return &ResourceProvisioner{Broker: b, Client: c}, nil
+}
+
+// ResourceProvisioner is an implementation of terraform.ResourceProvisioner
+// that communicates over RPC.
+type ResourceProvisioner struct {
+ Broker *plugin.MuxBroker
+ Client *rpc.Client
+}
+
+func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ var resp ResourceProvisionerValidateResponse
+ args := ResourceProvisionerValidateArgs{
+ Config: c,
+ }
+
+ err := p.Client.Call("Plugin.Validate", &args, &resp)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ var errs []error
+ if len(resp.Errors) > 0 {
+ errs = make([]error, len(resp.Errors))
+ for i, err := range resp.Errors {
+ errs[i] = err
+ }
+ }
+
+ return resp.Warnings, errs
+}
+
+func (p *ResourceProvisioner) Apply(
+ output terraform.UIOutput,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) error {
+ id := p.Broker.NextId()
+ go p.Broker.AcceptAndServe(id, &UIOutputServer{
+ UIOutput: output,
+ })
+
+ var resp ResourceProvisionerApplyResponse
+ args := &ResourceProvisionerApplyArgs{
+ OutputId: id,
+ State: s,
+ Config: c,
+ }
+
+ err := p.Client.Call("Plugin.Apply", args, &resp)
+ if err != nil {
+ return err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return err
+}
+
+func (p *ResourceProvisioner) Stop() error {
+ var resp ResourceProvisionerStopResponse
+ err := p.Client.Call("Plugin.Stop", new(interface{}), &resp)
+ if err != nil {
+ return err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return err
+}
+
+func (p *ResourceProvisioner) Close() error {
+ return p.Client.Close()
+}
+
+type ResourceProvisionerValidateArgs struct {
+ Config *terraform.ResourceConfig
+}
+
+type ResourceProvisionerValidateResponse struct {
+ Warnings []string
+ Errors []*plugin.BasicError
+}
+
+type ResourceProvisionerApplyArgs struct {
+ OutputId uint32
+ State *terraform.InstanceState
+ Config *terraform.ResourceConfig
+}
+
+type ResourceProvisionerApplyResponse struct {
+ Error *plugin.BasicError
+}
+
+type ResourceProvisionerStopResponse struct {
+ Error *plugin.BasicError
+}
+
+// ResourceProvisionerServer is a net/rpc compatible structure for serving
+// a ResourceProvisioner. This should not be used directly.
+type ResourceProvisionerServer struct {
+ Broker *plugin.MuxBroker
+ Provisioner terraform.ResourceProvisioner
+}
+
+func (s *ResourceProvisionerServer) Apply(
+ args *ResourceProvisionerApplyArgs,
+ result *ResourceProvisionerApplyResponse) error {
+ conn, err := s.Broker.Dial(args.OutputId)
+ if err != nil {
+ *result = ResourceProvisionerApplyResponse{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+ }
+ client := rpc.NewClient(conn)
+ defer client.Close()
+
+ output := &UIOutput{Client: client}
+
+ err = s.Provisioner.Apply(output, args.State, args.Config)
+ *result = ResourceProvisionerApplyResponse{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProvisionerServer) Validate(
+ args *ResourceProvisionerValidateArgs,
+ reply *ResourceProvisionerValidateResponse) error {
+ warns, errs := s.Provisioner.Validate(args.Config)
+ berrs := make([]*plugin.BasicError, len(errs))
+ for i, err := range errs {
+ berrs[i] = plugin.NewBasicError(err)
+ }
+ *reply = ResourceProvisionerValidateResponse{
+ Warnings: warns,
+ Errors: berrs,
+ }
+ return nil
+}
+
+func (s *ResourceProvisionerServer) Stop(
+ _ interface{},
+ reply *ResourceProvisionerStopResponse) error {
+ err := s.Provisioner.Stop()
+ *reply = ResourceProvisionerStopResponse{
+ Error: plugin.NewBasicError(err),
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/serve.go b/vendor/github.com/hashicorp/terraform/plugin/serve.go
new file mode 100644
index 00000000..2028a613
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/serve.go
@@ -0,0 +1,54 @@
+package plugin
+
+import (
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// The constants below are the names of the plugins that can be dispensed
+// from the plugin server.
+const (
+ ProviderPluginName = "provider"
+ ProvisionerPluginName = "provisioner"
+)
+
+// Handshake is the HandshakeConfig used to configure clients and servers.
+var Handshake = plugin.HandshakeConfig{
+ // The ProtocolVersion is the version that must match between TF core
+ // and TF plugins. This should be bumped whenever a change happens in
+ // one or the other that makes it so that they can't safely communicate.
+ // This could be adding a new interface value, it could be how
+ // helper/schema computes diffs, etc.
+ ProtocolVersion: 4,
+
+ // The magic cookie values should NEVER be changed.
+ MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE",
+ MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2",
+}
+
+type ProviderFunc func() terraform.ResourceProvider
+type ProvisionerFunc func() terraform.ResourceProvisioner
+
+// ServeOpts are the configurations to serve a plugin.
+type ServeOpts struct {
+ ProviderFunc ProviderFunc
+ ProvisionerFunc ProvisionerFunc
+}
+
+// Serve serves a plugin. This function never returns and should be the final
+// function called in the main function of the plugin.
+func Serve(opts *ServeOpts) {
+ plugin.Serve(&plugin.ServeConfig{
+ HandshakeConfig: Handshake,
+ Plugins: pluginMap(opts),
+ })
+}
+
+// pluginMap returns the map[string]plugin.Plugin to use for configuring a plugin
+// server or client.
+func pluginMap(opts *ServeOpts) map[string]plugin.Plugin {
+ return map[string]plugin.Plugin{
+ "provider": &ResourceProviderPlugin{F: opts.ProviderFunc},
+ "provisioner": &ResourceProvisionerPlugin{F: opts.ProvisionerFunc},
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
new file mode 100644
index 00000000..493efc0a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
@@ -0,0 +1,51 @@
+package plugin
+
+import (
+ "net/rpc"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// UIInput is an implementatin of terraform.UIInput that communicates
+// over RPC.
+type UIInput struct {
+ Client *rpc.Client
+}
+
+func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) {
+ var resp UIInputInputResponse
+ err := i.Client.Call("Plugin.Input", opts, &resp)
+ if err != nil {
+ return "", err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ return "", err
+ }
+
+ return resp.Value, nil
+}
+
+type UIInputInputResponse struct {
+ Value string
+ Error *plugin.BasicError
+}
+
+// UIInputServer is a net/rpc compatible structure for serving
+// a UIInputServer. This should not be used directly.
+type UIInputServer struct {
+ UIInput terraform.UIInput
+}
+
+func (s *UIInputServer) Input(
+ opts *terraform.InputOpts,
+ reply *UIInputInputResponse) error {
+ value, err := s.UIInput.Input(opts)
+ *reply = UIInputInputResponse{
+ Value: value,
+ Error: plugin.NewBasicError(err),
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_output.go b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go
new file mode 100644
index 00000000..c222b00c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go
@@ -0,0 +1,29 @@
+package plugin
+
+import (
+ "net/rpc"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// UIOutput is an implementatin of terraform.UIOutput that communicates
+// over RPC.
+type UIOutput struct {
+ Client *rpc.Client
+}
+
+func (o *UIOutput) Output(v string) {
+ o.Client.Call("Plugin.Output", v, new(interface{}))
+}
+
+// UIOutputServer is the RPC server for serving UIOutput.
+type UIOutputServer struct {
+ UIOutput terraform.UIOutput
+}
+
+func (s *UIOutputServer) Output(
+ v string,
+ reply *interface{}) error {
+ s.UIOutput.Output(v)
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go
new file mode 100644
index 00000000..306128ed
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context.go
@@ -0,0 +1,1022 @@
+package terraform
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/experiment"
+)
+
+// InputMode defines what sort of input will be asked for when Input
+// is called on Context.
+type InputMode byte
+
+const (
+ // InputModeVar asks for all variables
+ InputModeVar InputMode = 1 << iota
+
+ // InputModeVarUnset asks for variables which are not set yet.
+ // InputModeVar must be set for this to have an effect.
+ InputModeVarUnset
+
+ // InputModeProvider asks for provider variables
+ InputModeProvider
+
+ // InputModeStd is the standard operating mode and asks for both variables
+ // and providers.
+ InputModeStd = InputModeVar | InputModeProvider
+)
+
+var (
+ // contextFailOnShadowError will cause Context operations to return
+ // errors when shadow operations fail. This is only used for testing.
+ contextFailOnShadowError = false
+
+ // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every
+ // Plan operation, effectively testing the Diff DeepCopy whenever
+ // a Plan occurs. This is enabled for tests.
+ contextTestDeepCopyOnPlan = false
+)
+
+// ContextOpts are the user-configurable options to create a context with
+// NewContext.
+type ContextOpts struct {
+ Meta *ContextMeta
+ Destroy bool
+ Diff *Diff
+ Hooks []Hook
+ Module *module.Tree
+ Parallelism int
+ State *State
+ StateFutureAllowed bool
+ Providers map[string]ResourceProviderFactory
+ Provisioners map[string]ResourceProvisionerFactory
+ Shadow bool
+ Targets []string
+ Variables map[string]interface{}
+
+ UIInput UIInput
+}
+
+// ContextMeta is metadata about the running context. This is information
+// that this package or structure cannot determine on its own but exposes
+// into Terraform in various ways. This must be provided by the Context
+// initializer.
+type ContextMeta struct {
+ Env string // Env is the state environment
+}
+
+// Context represents all the context that Terraform needs in order to
+// perform operations on infrastructure. This structure is built using
+// NewContext. See the documentation for that.
+//
+// Extra functions on Context can be found in context_*.go files.
+type Context struct {
+ // Maintainer note: Anytime this struct is changed, please verify
+ // that newShadowContext still does the right thing. Tests should
+ // fail regardless but putting this note here as well.
+
+ components contextComponentFactory
+ destroy bool
+ diff *Diff
+ diffLock sync.RWMutex
+ hooks []Hook
+ meta *ContextMeta
+ module *module.Tree
+ sh *stopHook
+ shadow bool
+ state *State
+ stateLock sync.RWMutex
+ targets []string
+ uiInput UIInput
+ variables map[string]interface{}
+
+ l sync.Mutex // Lock acquired during any task
+ parallelSem Semaphore
+ providerInputConfig map[string]map[string]interface{}
+ runLock sync.Mutex
+ runCond *sync.Cond
+ runContext context.Context
+ runContextCancel context.CancelFunc
+ shadowErr error
+}
+
+// NewContext creates a new Context structure.
+//
+// Once a Context is creator, the pointer values within ContextOpts
+// should not be mutated in any way, since the pointers are copied, not
+// the values themselves.
+func NewContext(opts *ContextOpts) (*Context, error) {
+ // Validate the version requirement if it is given
+ if opts.Module != nil {
+ if err := checkRequiredVersion(opts.Module); err != nil {
+ return nil, err
+ }
+ }
+
+ // Copy all the hooks and add our stop hook. We don't append directly
+ // to the Config so that we're not modifying that in-place.
+ sh := new(stopHook)
+ hooks := make([]Hook, len(opts.Hooks)+1)
+ copy(hooks, opts.Hooks)
+ hooks[len(opts.Hooks)] = sh
+
+ state := opts.State
+ if state == nil {
+ state = new(State)
+ state.init()
+ }
+
+ // If our state is from the future, then error. Callers can avoid
+ // this error by explicitly setting `StateFutureAllowed`.
+ if !opts.StateFutureAllowed && state.FromFutureTerraform() {
+ return nil, fmt.Errorf(
+ "Terraform doesn't allow running any operations against a state\n"+
+ "that was written by a future Terraform version. The state is\n"+
+ "reporting it is written by Terraform '%s'.\n\n"+
+ "Please run at least that version of Terraform to continue.",
+ state.TFVersion)
+ }
+
+ // Explicitly reset our state version to our current version so that
+ // any operations we do will write out that our latest version
+ // has run.
+ state.TFVersion = Version
+
+ // Determine parallelism, default to 10. We do this both to limit
+ // CPU pressure but also to have an extra guard against rate throttling
+ // from providers.
+ par := opts.Parallelism
+ if par == 0 {
+ par = 10
+ }
+
+ // Set up the variables in the following sequence:
+ // 0 - Take default values from the configuration
+ // 1 - Take values from TF_VAR_x environment variables
+ // 2 - Take values specified in -var flags, overriding values
+ // set by environment variables if necessary. This includes
+ // values taken from -var-file in addition.
+ variables := make(map[string]interface{})
+
+ if opts.Module != nil {
+ var err error
+ variables, err = Variables(opts.Module, opts.Variables)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ diff := opts.Diff
+ if diff == nil {
+ diff = &Diff{}
+ }
+
+ return &Context{
+ components: &basicComponentFactory{
+ providers: opts.Providers,
+ provisioners: opts.Provisioners,
+ },
+ destroy: opts.Destroy,
+ diff: diff,
+ hooks: hooks,
+ meta: opts.Meta,
+ module: opts.Module,
+ shadow: opts.Shadow,
+ state: state,
+ targets: opts.Targets,
+ uiInput: opts.UIInput,
+ variables: variables,
+
+ parallelSem: NewSemaphore(par),
+ providerInputConfig: make(map[string]map[string]interface{}),
+ sh: sh,
+ }, nil
+}
+
+type ContextGraphOpts struct {
+ // If true, validates the graph structure (checks for cycles).
+ Validate bool
+
+ // Legacy graphs only: won't prune the graph
+ Verbose bool
+}
+
+// Graph returns the graph used for the given operation type.
+//
+// The most extensive or complex graph type is GraphTypePlan.
+func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
+ if opts == nil {
+ opts = &ContextGraphOpts{Validate: true}
+ }
+
+ log.Printf("[INFO] terraform: building graph: %s", typ)
+ switch typ {
+ case GraphTypeApply:
+ return (&ApplyGraphBuilder{
+ Module: c.module,
+ Diff: c.diff,
+ State: c.state,
+ Providers: c.components.ResourceProviders(),
+ Provisioners: c.components.ResourceProvisioners(),
+ Targets: c.targets,
+ Destroy: c.destroy,
+ Validate: opts.Validate,
+ }).Build(RootModulePath)
+
+ case GraphTypeInput:
+ // The input graph is just a slightly modified plan graph
+ fallthrough
+ case GraphTypeValidate:
+ // The validate graph is just a slightly modified plan graph
+ fallthrough
+ case GraphTypePlan:
+ // Create the plan graph builder
+ p := &PlanGraphBuilder{
+ Module: c.module,
+ State: c.state,
+ Providers: c.components.ResourceProviders(),
+ Targets: c.targets,
+ Validate: opts.Validate,
+ }
+
+ // Some special cases for other graph types shared with plan currently
+ var b GraphBuilder = p
+ switch typ {
+ case GraphTypeInput:
+ b = InputGraphBuilder(p)
+ case GraphTypeValidate:
+ // We need to set the provisioners so those can be validated
+ p.Provisioners = c.components.ResourceProvisioners()
+
+ b = ValidateGraphBuilder(p)
+ }
+
+ return b.Build(RootModulePath)
+
+ case GraphTypePlanDestroy:
+ return (&DestroyPlanGraphBuilder{
+ Module: c.module,
+ State: c.state,
+ Targets: c.targets,
+ Validate: opts.Validate,
+ }).Build(RootModulePath)
+
+ case GraphTypeRefresh:
+ return (&RefreshGraphBuilder{
+ Module: c.module,
+ State: c.state,
+ Providers: c.components.ResourceProviders(),
+ Targets: c.targets,
+ Validate: opts.Validate,
+ }).Build(RootModulePath)
+ }
+
+ return nil, fmt.Errorf("unknown graph type: %s", typ)
+}
+
+// ShadowError returns any errors caught during a shadow operation.
+//
+// A shadow operation is an operation run in parallel to a real operation
+// that performs the same tasks using new logic on copied state. The results
+// are compared to ensure that the new logic works the same as the old logic.
+// The shadow never affects the real operation or return values.
+//
+// The result of the shadow operation are only available through this function
+// call after a real operation is complete.
+//
+// For API consumers of Context, you can safely ignore this function
+// completely if you have no interest in helping report experimental feature
+// errors to Terraform maintainers. Otherwise, please call this function
+// after every operation and report this to the user.
+//
+// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
+// the real state or result of a real operation. They are purely informational
+// to assist in future Terraform versions being more stable. Please message
+// this effectively to the end user.
+//
+// This must be called only when no other operation is running (refresh,
+// plan, etc.). The result can be used in parallel to any other operation
+// running.
+func (c *Context) ShadowError() error {
+ return c.shadowErr
+}
+
+// State returns a copy of the current state associated with this context.
+//
+// This cannot safely be called in parallel with any other Context function.
+func (c *Context) State() *State {
+ return c.state.DeepCopy()
+}
+
+// Interpolater returns an Interpolater built on a copy of the state
+// that can be used to test interpolation values.
+func (c *Context) Interpolater() *Interpolater {
+ var varLock sync.Mutex
+ var stateLock sync.RWMutex
+ return &Interpolater{
+ Operation: walkApply,
+ Meta: c.meta,
+ Module: c.module,
+ State: c.state.DeepCopy(),
+ StateLock: &stateLock,
+ VariableValues: c.variables,
+ VariableValuesLock: &varLock,
+ }
+}
+
+// Input asks for input to fill variables and provider configurations.
+// This modifies the configuration in-place, so asking for Input twice
+// may result in different UI output showing different current values.
+func (c *Context) Input(mode InputMode) error {
+ defer c.acquireRun("input")()
+
+ if mode&InputModeVar != 0 {
+ // Walk the variables first for the root module. We walk them in
+ // alphabetical order for UX reasons.
+ rootConf := c.module.Config()
+ names := make([]string, len(rootConf.Variables))
+ m := make(map[string]*config.Variable)
+ for i, v := range rootConf.Variables {
+ names[i] = v.Name
+ m[v.Name] = v
+ }
+ sort.Strings(names)
+ for _, n := range names {
+ // If we only care about unset variables, then if the variable
+ // is set, continue on.
+ if mode&InputModeVarUnset != 0 {
+ if _, ok := c.variables[n]; ok {
+ continue
+ }
+ }
+
+ var valueType config.VariableType
+
+ v := m[n]
+ switch valueType = v.Type(); valueType {
+ case config.VariableTypeUnknown:
+ continue
+ case config.VariableTypeMap:
+ // OK
+ case config.VariableTypeList:
+ // OK
+ case config.VariableTypeString:
+ // OK
+ default:
+ panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
+ }
+
+ // If the variable is not already set, and the variable defines a
+ // default, use that for the value.
+ if _, ok := c.variables[n]; !ok {
+ if v.Default != nil {
+ c.variables[n] = v.Default.(string)
+ continue
+ }
+ }
+
+ // this should only happen during tests
+ if c.uiInput == nil {
+ log.Println("[WARN] Content.uiInput is nil")
+ continue
+ }
+
+ // Ask the user for a value for this variable
+ var value string
+ retry := 0
+ for {
+ var err error
+ value, err = c.uiInput.Input(&InputOpts{
+ Id: fmt.Sprintf("var.%s", n),
+ Query: fmt.Sprintf("var.%s", n),
+ Description: v.Description,
+ })
+ if err != nil {
+ return fmt.Errorf(
+ "Error asking for %s: %s", n, err)
+ }
+
+ if value == "" && v.Required() {
+ // Redo if it is required, but abort if we keep getting
+ // blank entries
+ if retry > 2 {
+ return fmt.Errorf("missing required value for %q", n)
+ }
+ retry++
+ continue
+ }
+
+ break
+ }
+
+ // no value provided, so don't set the variable at all
+ if value == "" {
+ continue
+ }
+
+ decoded, err := parseVariableAsHCL(n, value, valueType)
+ if err != nil {
+ return err
+ }
+
+ if decoded != nil {
+ c.variables[n] = decoded
+ }
+ }
+ }
+
+ if mode&InputModeProvider != 0 {
+ // Build the graph
+ graph, err := c.Graph(GraphTypeInput, nil)
+ if err != nil {
+ return err
+ }
+
+ // Do the walk
+ if _, err := c.walk(graph, nil, walkInput); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Apply applies the changes represented by this context and returns
+// the resulting state.
+//
+// Even in the case an error is returned, the state may be returned and will
+// potentially be partially updated. In addition to returning the resulting
+// state, this context is updated with the latest state.
+//
+// If the state is required after an error, the caller should call
+// Context.State, rather than rely on the return value.
+//
+// TODO: Apply and Refresh should either always return a state, or rely on the
+// State() method. Currently the helper/resource testing framework relies
+// on the absence of a returned state to determine if Destroy can be
+// called, so that will need to be refactored before this can be changed.
+func (c *Context) Apply() (*State, error) {
+ defer c.acquireRun("apply")()
+
+ // Copy our own state
+ c.state = c.state.DeepCopy()
+
+ // Build the graph.
+ graph, err := c.Graph(GraphTypeApply, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Determine the operation
+ operation := walkApply
+ if c.destroy {
+ operation = walkDestroy
+ }
+
+ // Walk the graph
+ walker, err := c.walk(graph, graph, operation)
+ if len(walker.ValidationErrors) > 0 {
+ err = multierror.Append(err, walker.ValidationErrors...)
+ }
+
+ // Clean out any unused things
+ c.state.prune()
+
+ return c.state, err
+}
+
+// Plan generates an execution plan for the given context.
+//
+// The execution plan encapsulates the context and can be stored
+// in order to reinstantiate a context later for Apply.
+//
+// Plan also updates the diff of this context to be the diff generated
+// by the plan, so Apply can be called after.
+func (c *Context) Plan() (*Plan, error) {
+ defer c.acquireRun("plan")()
+
+ p := &Plan{
+ Module: c.module,
+ Vars: c.variables,
+ State: c.state,
+ Targets: c.targets,
+ }
+
+ var operation walkOperation
+ if c.destroy {
+ operation = walkPlanDestroy
+ } else {
+ // Set our state to be something temporary. We do this so that
+ // the plan can update a fake state so that variables work, then
+ // we replace it back with our old state.
+ old := c.state
+ if old == nil {
+ c.state = &State{}
+ c.state.init()
+ } else {
+ c.state = old.DeepCopy()
+ }
+ defer func() {
+ c.state = old
+ }()
+
+ operation = walkPlan
+ }
+
+ // Setup our diff
+ c.diffLock.Lock()
+ c.diff = new(Diff)
+ c.diff.init()
+ c.diffLock.Unlock()
+
+ // Build the graph.
+ graphType := GraphTypePlan
+ if c.destroy {
+ graphType = GraphTypePlanDestroy
+ }
+ graph, err := c.Graph(graphType, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Do the walk
+ walker, err := c.walk(graph, graph, operation)
+ if err != nil {
+ return nil, err
+ }
+ p.Diff = c.diff
+
+ // If this is true, it means we're running unit tests. In this case,
+ // we perform a deep copy just to ensure that all context tests also
+ // test that a diff is copy-able. This will panic if it fails. This
+ // is enabled during unit tests.
+ //
+ // This should never be true during production usage, but even if it is,
+ // it can't do any real harm.
+ if contextTestDeepCopyOnPlan {
+ p.Diff.DeepCopy()
+ }
+
+ /*
+ // We don't do the reverification during the new destroy plan because
+ // it will use a different apply process.
+ if X_legacyGraph {
+ // Now that we have a diff, we can build the exact graph that Apply will use
+ // and catch any possible cycles during the Plan phase.
+ if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
+ return nil, err
+ }
+ }
+ */
+
+ var errs error
+ if len(walker.ValidationErrors) > 0 {
+ errs = multierror.Append(errs, walker.ValidationErrors...)
+ }
+ return p, errs
+}
+
+// Refresh goes through all the resources in the state and refreshes them
+// to their latest state. This will update the state that this context
+// works with, along with returning it.
+//
+// Even in the case an error is returned, the state may be returned and
+// will potentially be partially updated.
+func (c *Context) Refresh() (*State, error) {
+ defer c.acquireRun("refresh")()
+
+ // Copy our own state
+ c.state = c.state.DeepCopy()
+
+ // Build the graph.
+ graph, err := c.Graph(GraphTypeRefresh, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Do the walk
+ if _, err := c.walk(graph, graph, walkRefresh); err != nil {
+ return nil, err
+ }
+
+ // Clean out any unused things
+ c.state.prune()
+
+ return c.state, nil
+}
+
+// Stop stops the running task.
+//
+// Stop will block until the task completes.
+func (c *Context) Stop() {
+ log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
+
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ // If we're running, then stop
+ if c.runContextCancel != nil {
+ log.Printf("[WARN] terraform: run context exists, stopping")
+
+ // Tell the hook we want to stop
+ c.sh.Stop()
+
+ // Stop the context
+ c.runContextCancel()
+ c.runContextCancel = nil
+ }
+
+ // Grab the condition var before we exit
+ if cond := c.runCond; cond != nil {
+ cond.Wait()
+ }
+
+ log.Printf("[WARN] terraform: stop complete")
+}
+
+// Validate validates the configuration and returns any warnings or errors.
+func (c *Context) Validate() ([]string, []error) {
+ defer c.acquireRun("validate")()
+
+ var errs error
+
+ // Validate the configuration itself
+ if err := c.module.Validate(); err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ // This only needs to be done for the root module, since inter-module
+ // variables are validated in the module tree.
+ if config := c.module.Config(); config != nil {
+ // Validate the user variables
+ if err := smcUserVariables(config, c.variables); len(err) > 0 {
+ errs = multierror.Append(errs, err...)
+ }
+ }
+
+ // If we have errors at this point, the graphing has no chance,
+ // so just bail early.
+ if errs != nil {
+ return nil, []error{errs}
+ }
+
+ // Build the graph so we can walk it and run Validate on nodes.
+ // We also validate the graph generated here, but this graph doesn't
+ // necessarily match the graph that Plan will generate, so we'll validate the
+ // graph again later after Planning.
+ graph, err := c.Graph(GraphTypeValidate, nil)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ // Walk
+ walker, err := c.walk(graph, graph, walkValidate)
+ if err != nil {
+ return nil, multierror.Append(errs, err).Errors
+ }
+
+ // Return the result
+ rerrs := multierror.Append(errs, walker.ValidationErrors...)
+
+ sort.Strings(walker.ValidationWarnings)
+ sort.Slice(rerrs.Errors, func(i, j int) bool {
+ return rerrs.Errors[i].Error() < rerrs.Errors[j].Error()
+ })
+
+ return walker.ValidationWarnings, rerrs.Errors
+}
+
+// Module returns the module tree associated with this context.
+func (c *Context) Module() *module.Tree {
+ return c.module
+}
+
+// Variables will return the mapping of variables that were defined
+// for this Context. If Input was called, this mapping may be different
+// than what was given.
+func (c *Context) Variables() map[string]interface{} {
+ return c.variables
+}
+
+// SetVariable sets a variable after a context has already been built.
+func (c *Context) SetVariable(k string, v interface{}) {
+ c.variables[k] = v
+}
+
+func (c *Context) acquireRun(phase string) func() {
+ // With the run lock held, grab the context lock to make changes
+ // to the run context.
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ // Wait until we're no longer running
+ for c.runCond != nil {
+ c.runCond.Wait()
+ }
+
+ // Build our lock
+ c.runCond = sync.NewCond(&c.l)
+
+ // Setup debugging
+ dbug.SetPhase(phase)
+
+ // Create a new run context
+ c.runContext, c.runContextCancel = context.WithCancel(context.Background())
+
+ // Reset the stop hook so we're not stopped
+ c.sh.Reset()
+
+ // Reset the shadow errors
+ c.shadowErr = nil
+
+ return c.releaseRun
+}
+
+func (c *Context) releaseRun() {
+ // Grab the context lock so that we can make modifications to fields
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ // setting the phase to "INVALID" lets us easily detect if we have
+ // operations happening outside of a run, or we missed setting the proper
+ // phase
+ dbug.SetPhase("INVALID")
+
+ // End our run. We check if runContext is non-nil because it can be
+ // set to nil if it was cancelled via Stop()
+ if c.runContextCancel != nil {
+ c.runContextCancel()
+ }
+
+ // Unlock all waiting our condition
+ cond := c.runCond
+ c.runCond = nil
+ cond.Broadcast()
+
+ // Unset the context
+ c.runContext = nil
+}
+
+func (c *Context) walk(
+ graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) {
+ // Keep track of the "real" context which is the context that does
+ // the real work: talking to real providers, modifying real state, etc.
+ realCtx := c
+
+ // If we don't want shadowing, remove it
+ if !experiment.Enabled(experiment.X_shadow) {
+ shadow = nil
+ }
+
+ // Just log this so we can see it in a debug log
+ if !c.shadow {
+ log.Printf("[WARN] terraform: shadow graph disabled")
+ shadow = nil
+ }
+
+ // If we have a shadow graph, walk that as well
+ var shadowCtx *Context
+ var shadowCloser Shadow
+ if shadow != nil {
+ // Build the shadow context. In the process, override the real context
+ // with the one that is wrapped so that the shadow context can verify
+ // the results of the real.
+ realCtx, shadowCtx, shadowCloser = newShadowContext(c)
+ }
+
+ log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
+
+ walker := &ContextGraphWalker{
+ Context: realCtx,
+ Operation: operation,
+ StopContext: c.runContext,
+ }
+
+ // Watch for a stop so we can call the provider Stop() API.
+ watchStop, watchWait := c.watchStop(walker)
+
+ // Walk the real graph, this will block until it completes
+ realErr := graph.Walk(walker)
+
+ // Close the channel so the watcher stops, and wait for it to return.
+ close(watchStop)
+ <-watchWait
+
+ // If we have a shadow graph and we interrupted the real graph, then
+ // we just close the shadow and never verify it. It is non-trivial to
+ // recreate the exact execution state up until an interruption so this
+ // isn't supported with shadows at the moment.
+ if shadowCloser != nil && c.sh.Stopped() {
+ // Ignore the error result, there is nothing we could care about
+ shadowCloser.CloseShadow()
+
+ // Set it to nil so we don't do anything
+ shadowCloser = nil
+ }
+
+ // If we have a shadow graph, wait for that to complete.
+ if shadowCloser != nil {
+ // Build the graph walker for the shadow. We also wrap this in
+ // a panicwrap so that panics are captured. For the shadow graph,
+ // we just want panics to be normal errors rather than to crash
+ // Terraform.
+ shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{
+ Context: shadowCtx,
+ Operation: operation,
+ })
+
+ // Kick off the shadow walk. This will block on any operations
+ // on the real walk so it is fine to start first.
+ log.Printf("[INFO] Starting shadow graph walk: %s", operation.String())
+ shadowCh := make(chan error)
+ go func() {
+ shadowCh <- shadow.Walk(shadowWalker)
+ }()
+
+ // Notify the shadow that we're done
+ if err := shadowCloser.CloseShadow(); err != nil {
+ c.shadowErr = multierror.Append(c.shadowErr, err)
+ }
+
+ // Wait for the walk to end
+ log.Printf("[DEBUG] Waiting for shadow graph to complete...")
+ shadowWalkErr := <-shadowCh
+
+ // Get any shadow errors
+ if err := shadowCloser.ShadowError(); err != nil {
+ c.shadowErr = multierror.Append(c.shadowErr, err)
+ }
+
+ // Verify the contexts (compare)
+ if err := shadowContextVerify(realCtx, shadowCtx); err != nil {
+ c.shadowErr = multierror.Append(c.shadowErr, err)
+ }
+
+ // At this point, if we're supposed to fail on error, then
+ // we PANIC. Some tests just verify that there is an error,
+ // so simply appending it to realErr and returning could hide
+ // shadow problems.
+ //
+ // This must be done BEFORE appending shadowWalkErr since the
+ // shadowWalkErr may include expected errors.
+ //
+ // We only do this if we don't have a real error. In the case of
+ // a real error, we can't guarantee what nodes were and weren't
+ // traversed in parallel scenarios so we can't guarantee no
+ // shadow errors.
+ if c.shadowErr != nil && contextFailOnShadowError && realErr == nil {
+ panic(multierror.Prefix(c.shadowErr, "shadow graph:"))
+ }
+
+ // Now, if we have a walk error, we append that through
+ if shadowWalkErr != nil {
+ c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr)
+ }
+
+ if c.shadowErr == nil {
+ log.Printf("[INFO] Shadow graph success!")
+ } else {
+ log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr)
+
+ // If we're supposed to fail on shadow errors, then report it
+ if contextFailOnShadowError {
+ realErr = multierror.Append(realErr, multierror.Prefix(
+ c.shadowErr, "shadow graph:"))
+ }
+ }
+ }
+
+ return walker, realErr
+}
+
+// watchStop immediately returns a `stop` and a `wait` chan after dispatching
+// the watchStop goroutine. This will watch the runContext for cancellation and
+// stop the providers accordingly. When the watch is no longer needed, the
+// `stop` chan should be closed before waiting on the `wait` chan.
+// The `wait` chan is important, because without synchronizing with the end of
+// the watchStop goroutine, the runContext may also be closed during the select
+// incorrectly causing providers to be stopped. Even if the graph walk is done
+// at that point, stopping a provider permanently cancels its StopContext which
+// can cause later actions to fail.
+func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {
+ stop := make(chan struct{})
+ wait := make(chan struct{})
+
+ // get the runContext cancellation channel now, because releaseRun will
+ // write to the runContext field.
+ done := c.runContext.Done()
+
+ go func() {
+ defer close(wait)
+ // Wait for a stop or completion
+ select {
+ case <-done:
+ // done means the context was canceled, so we need to try and stop
+ // providers.
+ case <-stop:
+ // our own stop channel was closed.
+ return
+ }
+
+ // If we're here, we're stopped, trigger the call.
+
+ {
+ // Copy the providers so that a misbehaved blocking Stop doesn't
+ // completely hang Terraform.
+ walker.providerLock.Lock()
+ ps := make([]ResourceProvider, 0, len(walker.providerCache))
+ for _, p := range walker.providerCache {
+ ps = append(ps, p)
+ }
+ defer walker.providerLock.Unlock()
+
+ for _, p := range ps {
+ // We ignore the error for now since there isn't any reasonable
+ // action to take if there is an error here, since the stop is still
+ // advisory: Terraform will exit once the graph node completes.
+ p.Stop()
+ }
+ }
+
+ {
+ // Call stop on all the provisioners
+ walker.provisionerLock.Lock()
+ ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))
+ for _, p := range walker.provisionerCache {
+ ps = append(ps, p)
+ }
+ defer walker.provisionerLock.Unlock()
+
+ for _, p := range ps {
+ // We ignore the error for now since there isn't any reasonable
+ // action to take if there is an error here, since the stop is still
+ // advisory: Terraform will exit once the graph node completes.
+ p.Stop()
+ }
+ }
+ }()
+
+ return stop, wait
+}
+
+// parseVariableAsHCL parses the value of a single variable as would have been specified
+// on the command line via -var or in an environment variable named TF_VAR_x, where x is
+// the name of the variable. In order to get around the restriction of HCL requiring a
+// top level object, we prepend a sentinel key, decode the user-specified value as its
+// value and pull the value back out of the resulting map.
+func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
+ // expecting a string so don't decode anything, just strip quotes
+ if targetType == config.VariableTypeString {
+ return strings.Trim(input, `"`), nil
+ }
+
+ // return empty types
+ if strings.TrimSpace(input) == "" {
+ switch targetType {
+ case config.VariableTypeList:
+ return []interface{}{}, nil
+ case config.VariableTypeMap:
+ return make(map[string]interface{}), nil
+ }
+ }
+
+ const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
+ inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input)
+
+ var decoded map[string]interface{}
+ err := hcl.Decode(&decoded, inputWithSentinal)
+ if err != nil {
+ return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err)
+ }
+
+ if len(decoded) != 1 {
+ return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input)
+ }
+
+ parsedValue, ok := decoded[sentinelValue]
+ if !ok {
+ return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
+ }
+
+ switch targetType {
+ case config.VariableTypeList:
+ return parsedValue, nil
+ case config.VariableTypeMap:
+ if list, ok := parsedValue.([]map[string]interface{}); ok {
+ return list[0], nil
+ }
+
+ return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
+ default:
+ panic(fmt.Errorf("unknown type %s", targetType.Printable()))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
new file mode 100644
index 00000000..6f507445
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
@@ -0,0 +1,65 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// contextComponentFactory is the interface that Context uses
+// to initialize various components such as providers and provisioners.
+// This factory gets more information than the raw maps using to initialize
+// a Context. This information is used for debugging.
+type contextComponentFactory interface {
+ // ResourceProvider creates a new ResourceProvider with the given
+ // type. The "uid" is a unique identifier for this provider being
+ // initialized that can be used for internal tracking.
+ ResourceProvider(typ, uid string) (ResourceProvider, error)
+ ResourceProviders() []string
+
+ // ResourceProvisioner creates a new ResourceProvisioner with the
+ // given type. The "uid" is a unique identifier for this provisioner
+ // being initialized that can be used for internal tracking.
+ ResourceProvisioner(typ, uid string) (ResourceProvisioner, error)
+ ResourceProvisioners() []string
+}
+
+// basicComponentFactory just calls a factory from a map directly.
+type basicComponentFactory struct {
+ providers map[string]ResourceProviderFactory
+ provisioners map[string]ResourceProvisionerFactory
+}
+
+func (c *basicComponentFactory) ResourceProviders() []string {
+ result := make([]string, len(c.providers))
+ for k, _ := range c.providers {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+func (c *basicComponentFactory) ResourceProvisioners() []string {
+ result := make([]string, len(c.provisioners))
+ for k, _ := range c.provisioners {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) {
+ f, ok := c.providers[typ]
+ if !ok {
+ return nil, fmt.Errorf("unknown provider %q", typ)
+ }
+
+ return f()
+}
+
+func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) {
+ f, ok := c.provisioners[typ]
+ if !ok {
+ return nil, fmt.Errorf("unknown provisioner %q", typ)
+ }
+
+ return f()
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
new file mode 100644
index 00000000..084f0105
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
@@ -0,0 +1,32 @@
+package terraform
+
+//go:generate stringer -type=GraphType context_graph_type.go
+
+// GraphType is an enum of the type of graph to create with a Context.
+// The values of the constants may change so they shouldn't be depended on;
+// always use the constant name.
+type GraphType byte
+
+const (
+ GraphTypeInvalid GraphType = 0
+ GraphTypeLegacy GraphType = iota
+ GraphTypeRefresh
+ GraphTypePlan
+ GraphTypePlanDestroy
+ GraphTypeApply
+ GraphTypeInput
+ GraphTypeValidate
+)
+
+// GraphTypeMap is a mapping of human-readable string to GraphType. This
+// is useful to use as the mechanism for human input for configurable
+// graph types.
+var GraphTypeMap = map[string]GraphType{
+ "apply": GraphTypeApply,
+ "input": GraphTypeInput,
+ "plan": GraphTypePlan,
+ "plan-destroy": GraphTypePlanDestroy,
+ "refresh": GraphTypeRefresh,
+ "legacy": GraphTypeLegacy,
+ "validate": GraphTypeValidate,
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
new file mode 100644
index 00000000..f1d57760
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
@@ -0,0 +1,77 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// ImportOpts are used as the configuration for Import.
+type ImportOpts struct {
+ // Targets are the targets to import
+ Targets []*ImportTarget
+
+ // Module is optional, and specifies a config module that is loaded
+ // into the graph and evaluated. The use case for this is to provide
+ // provider configuration.
+ Module *module.Tree
+}
+
+// ImportTarget is a single resource to import.
+type ImportTarget struct {
+ // Addr is the full resource address of the resource to import.
+ // Example: "module.foo.aws_instance.bar"
+ Addr string
+
+ // ID is the ID of the resource to import. This is resource-specific.
+ ID string
+
+ // Provider string
+ Provider string
+}
+
+// Import takes already-created external resources and brings them
+// under Terraform management. Import requires the exact type, name, and ID
+// of the resources to import.
+//
+// This operation is idempotent. If the requested resource is already
+// imported, no changes are made to the state.
+//
+// Further, this operation also gracefully handles partial state. If during
+// an import there is a failure, all previously imported resources remain
+// imported.
+func (c *Context) Import(opts *ImportOpts) (*State, error) {
+ // Hold a lock since we can modify our own state here
+ defer c.acquireRun("import")()
+
+ // Copy our own state
+ c.state = c.state.DeepCopy()
+
+ // If no module is given, default to the module configured with
+ // the Context.
+ module := opts.Module
+ if module == nil {
+ module = c.module
+ }
+
+ // Initialize our graph builder
+ builder := &ImportGraphBuilder{
+ ImportTargets: opts.Targets,
+ Module: module,
+ Providers: c.components.ResourceProviders(),
+ }
+
+ // Build the graph!
+ graph, err := builder.Build(RootModulePath)
+ if err != nil {
+ return c.state, err
+ }
+
+ // Walk it
+ if _, err := c.walk(graph, nil, walkImport); err != nil {
+ return c.state, err
+ }
+
+ // Clean the state
+ c.state.prune()
+
+ return c.state, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go
new file mode 100644
index 00000000..265339f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/debug.go
@@ -0,0 +1,523 @@
+package terraform
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+)
+
+// DebugInfo is the global handler for writing the debug archive. All methods
+// are safe to call concurrently. Setting DebugInfo to nil will disable writing
+// the debug archive. All methods are safe to call on the nil value.
+var dbug *debugInfo
+
+// SetDebugInfo initializes the debug handler with a backing file in the
+// provided directory. This must be called before any other terraform package
+// operations or not at all. Once his is called, CloseDebugInfo should be
+// called before program exit.
+func SetDebugInfo(path string) error {
+ if os.Getenv("TF_DEBUG") == "" {
+ return nil
+ }
+
+ di, err := newDebugInfoFile(path)
+ if err != nil {
+ return err
+ }
+
+ dbug = di
+ return nil
+}
+
+// CloseDebugInfo is the exported interface to Close the debug info handler.
+// The debug handler needs to be closed before program exit, so we export this
+// function to be deferred in the appropriate entrypoint for our executable.
+func CloseDebugInfo() error {
+ return dbug.Close()
+}
+
+// newDebugInfoFile initializes the global debug handler with a backing file in
+// the provided directory.
+func newDebugInfoFile(dir string) (*debugInfo, error) {
+ err := os.MkdirAll(dir, 0755)
+ if err != nil {
+ return nil, err
+ }
+
+ // FIXME: not guaranteed unique, but good enough for now
+ name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999"))
+ archivePath := filepath.Join(dir, name+".tar.gz")
+
+ f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil {
+ return nil, err
+ }
+ return newDebugInfo(name, f)
+}
+
+// newDebugInfo initializes the global debug handler.
+func newDebugInfo(name string, w io.Writer) (*debugInfo, error) {
+ gz := gzip.NewWriter(w)
+
+ d := &debugInfo{
+ name: name,
+ w: w,
+ gz: gz,
+ tar: tar.NewWriter(gz),
+ }
+
+ // create the subdirs we need
+ topHdr := &tar.Header{
+ Name: name,
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ }
+ graphsHdr := &tar.Header{
+ Name: name + "/graphs",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ }
+ err := d.tar.WriteHeader(topHdr)
+ // if the first errors, the second will too
+ err = d.tar.WriteHeader(graphsHdr)
+ if err != nil {
+ return nil, err
+ }
+
+ return d, nil
+}
+
+// debugInfo provides various methods for writing debug information to a
+// central archive. The debugInfo struct should be initialized once before any
+// output is written, and Close should be called before program exit. All
+// exported methods on debugInfo will be safe for concurrent use. The exported
+// methods are also all safe to call on a nil pointer, so that there is no need
+// for conditional blocks before writing debug information.
+//
+// Each write operation done by the debugInfo will flush the gzip.Writer and
+// tar.Writer, and call Sync() or Flush() on the output writer as needed. This
+// ensures that as much data as possible is written to storage in the event of
+// a crash. The append format of the tar file, and the stream format of the
+// gzip writer allow easy recovery f the data in the event that the debugInfo
+// is not closed before program exit.
+type debugInfo struct {
+ sync.Mutex
+
+ // archive root directory name
+ name string
+
+ // current operation phase
+ phase string
+
+ // step is monotonic counter for for recording the order of operations
+ step int
+
+ // flag to protect Close()
+ closed bool
+
+ // the debug log output is in a tar.gz format, written to the io.Writer w
+ w io.Writer
+ gz *gzip.Writer
+ tar *tar.Writer
+}
+
+// Set the name of the current operational phase in the debug handler. Each file
+// in the archive will contain the name of the phase in which it was created,
+// i.e. "input", "apply", "plan", "refresh", "validate"
+func (d *debugInfo) SetPhase(phase string) {
+ if d == nil {
+ return
+ }
+ d.Lock()
+ defer d.Unlock()
+
+ d.phase = phase
+}
+
+// Close the debugInfo, finalizing the data in storage. This closes the
+// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is
+// also closed.
+func (d *debugInfo) Close() error {
+ if d == nil {
+ return nil
+ }
+
+ d.Lock()
+ defer d.Unlock()
+
+ if d.closed {
+ return nil
+ }
+ d.closed = true
+
+ d.tar.Close()
+ d.gz.Close()
+
+ if c, ok := d.w.(io.Closer); ok {
+ return c.Close()
+ }
+ return nil
+}
+
+// debug buffer is an io.WriteCloser that will write itself to the debug
+// archive when closed.
+type debugBuffer struct {
+ debugInfo *debugInfo
+ name string
+ buf bytes.Buffer
+}
+
+func (b *debugBuffer) Write(d []byte) (int, error) {
+ return b.buf.Write(d)
+}
+
+func (b *debugBuffer) Close() error {
+ return b.debugInfo.WriteFile(b.name, b.buf.Bytes())
+}
+
+// ioutils only has a noop ReadCloser
+type nopWriteCloser struct{}
+
+func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil }
+func (nopWriteCloser) Close() error { return nil }
+
+// NewFileWriter returns an io.WriteClose that will be buffered and written to
+// the debug archive when closed.
+func (d *debugInfo) NewFileWriter(name string) io.WriteCloser {
+ if d == nil {
+ return nopWriteCloser{}
+ }
+
+ return &debugBuffer{
+ debugInfo: d,
+ name: name,
+ }
+}
+
+type syncer interface {
+ Sync() error
+}
+
+type flusher interface {
+ Flush() error
+}
+
+// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called
+// on the output writer if they are available.
+func (d *debugInfo) flush() {
+ d.tar.Flush()
+ d.gz.Flush()
+
+ if f, ok := d.w.(flusher); ok {
+ f.Flush()
+ }
+
+ if s, ok := d.w.(syncer); ok {
+ s.Sync()
+ }
+}
+
+// WriteFile writes data as a single file to the debug arhive.
+func (d *debugInfo) WriteFile(name string, data []byte) error {
+ if d == nil {
+ return nil
+ }
+
+ d.Lock()
+ defer d.Unlock()
+ return d.writeFile(name, data)
+}
+
+func (d *debugInfo) writeFile(name string, data []byte) error {
+ defer d.flush()
+ path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name)
+ d.step++
+
+ hdr := &tar.Header{
+ Name: path,
+ Mode: 0644,
+ Size: int64(len(data)),
+ }
+ err := d.tar.WriteHeader(hdr)
+ if err != nil {
+ return err
+ }
+
+ _, err = d.tar.Write(data)
+ return err
+}
+
+// DebugHook implements all methods of the terraform.Hook interface, and writes
+// the arguments to a file in the archive. When a suitable format for the
+// argument isn't available, the argument is encoded using json.Marshal. If the
+// debug handler is nil, all DebugHook methods are noop, so no time is spent in
+// marshaling the data structures.
+type DebugHook struct{}
+
+func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String() + "\n")
+ }
+
+ idCopy, err := id.Copy()
+ if err != nil {
+ return HookActionContinue, err
+ }
+ js, err := json.MarshalIndent(idCopy, "", " ")
+ if err != nil {
+ return HookActionContinue, err
+ }
+ buf.Write(js)
+
+ dbug.WriteFile("hook-PreApply", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String() + "\n")
+ }
+
+ if err != nil {
+ buf.WriteString(err.Error())
+ }
+
+ dbug.WriteFile("hook-PostApply", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PreDiff", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ idCopy, err := id.Copy()
+ if err != nil {
+ return HookActionContinue, err
+ }
+ js, err := json.MarshalIndent(idCopy, "", " ")
+ if err != nil {
+ return HookActionContinue, err
+ }
+ buf.Write(js)
+
+ dbug.WriteFile("hook-PostDiff", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PreProvisionResource", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PostProvisionResource", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+ buf.WriteString(s + "\n")
+
+ dbug.WriteFile("hook-PreProvision", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+ buf.WriteString(s + "\n")
+
+ dbug.WriteFile("hook-PostProvision", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) {
+ if dbug == nil {
+ return
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+ buf.WriteString(s1 + "\n")
+ buf.WriteString(s2 + "\n")
+
+ dbug.WriteFile("hook-ProvisionOutput", buf.Bytes())
+}
+
+func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PreRefresh", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PostRefresh", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+ buf.WriteString(s + "\n")
+
+ dbug.WriteFile("hook-PreImportState", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ for _, is := range iss {
+ if is != nil {
+ buf.WriteString(is.String() + "\n")
+ }
+ }
+ dbug.WriteFile("hook-PostImportState", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+// skip logging this for now, since it could be huge
+func (*DebugHook) PostStateUpdate(*State) (HookAction, error) {
+ return HookActionContinue, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go
new file mode 100644
index 00000000..a9fae6c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go
@@ -0,0 +1,866 @@
+package terraform
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/mitchellh/copystructure"
+)
+
+// DiffChangeType is an enum with the kind of changes a diff has planned.
+type DiffChangeType byte
+
+const (
+ DiffInvalid DiffChangeType = iota
+ DiffNone
+ DiffCreate
+ DiffUpdate
+ DiffDestroy
+ DiffDestroyCreate
+)
+
+// multiVal matches the index key to a flatmapped set, list or map
+var multiVal = regexp.MustCompile(`\.(#|%)$`)
+
+// Diff trackes the changes that are necessary to apply a configuration
+// to an existing infrastructure.
+type Diff struct {
+ // Modules contains all the modules that have a diff
+ Modules []*ModuleDiff
+}
+
+// Prune cleans out unused structures in the diff without affecting
+// the behavior of the diff at all.
+//
+// This is not safe to call concurrently. This is safe to call on a
+// nil Diff.
+func (d *Diff) Prune() {
+ if d == nil {
+ return
+ }
+
+ // Prune all empty modules
+ newModules := make([]*ModuleDiff, 0, len(d.Modules))
+ for _, m := range d.Modules {
+ // If the module isn't empty, we keep it
+ if !m.Empty() {
+ newModules = append(newModules, m)
+ }
+ }
+ if len(newModules) == 0 {
+ newModules = nil
+ }
+ d.Modules = newModules
+}
+
+// AddModule adds the module with the given path to the diff.
+//
+// This should be the preferred method to add module diffs since it
+// allows us to optimize lookups later as well as control sorting.
+func (d *Diff) AddModule(path []string) *ModuleDiff {
+ m := &ModuleDiff{Path: path}
+ m.init()
+ d.Modules = append(d.Modules, m)
+ return m
+}
+
+// ModuleByPath is used to lookup the module diff for the given path.
+// This should be the preferred lookup mechanism as it allows for future
+// lookup optimizations.
+func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
+ if d == nil {
+ return nil
+ }
+ for _, mod := range d.Modules {
+ if mod.Path == nil {
+ panic("missing module path")
+ }
+ if reflect.DeepEqual(mod.Path, path) {
+ return mod
+ }
+ }
+ return nil
+}
+
+// RootModule returns the ModuleState for the root module
+func (d *Diff) RootModule() *ModuleDiff {
+ root := d.ModuleByPath(rootModulePath)
+ if root == nil {
+ panic("missing root module")
+ }
+ return root
+}
+
+// Empty returns true if the diff has no changes.
+func (d *Diff) Empty() bool {
+ if d == nil {
+ return true
+ }
+
+ for _, m := range d.Modules {
+ if !m.Empty() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Equal compares two diffs for exact equality.
+//
+// This is different from the Same comparison that is supported which
+// checks for operation equality taking into account computed values. Equal
+// instead checks for exact equality.
+func (d *Diff) Equal(d2 *Diff) bool {
+ // If one is nil, they must both be nil
+ if d == nil || d2 == nil {
+ return d == d2
+ }
+
+ // Sort the modules
+ sort.Sort(moduleDiffSort(d.Modules))
+ sort.Sort(moduleDiffSort(d2.Modules))
+
+ // Copy since we have to modify the module destroy flag to false so
+ // we don't compare that. TODO: delete this when we get rid of the
+ // destroy flag on modules.
+ dCopy := d.DeepCopy()
+ d2Copy := d2.DeepCopy()
+ for _, m := range dCopy.Modules {
+ m.Destroy = false
+ }
+ for _, m := range d2Copy.Modules {
+ m.Destroy = false
+ }
+
+ // Use DeepEqual
+ return reflect.DeepEqual(dCopy, d2Copy)
+}
+
+// DeepCopy performs a deep copy of all parts of the Diff, making the
+// resulting Diff safe to use without modifying this one.
+func (d *Diff) DeepCopy() *Diff {
+ copy, err := copystructure.Config{Lock: true}.Copy(d)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*Diff)
+}
+
+func (d *Diff) String() string {
+ var buf bytes.Buffer
+
+ keys := make([]string, 0, len(d.Modules))
+ lookup := make(map[string]*ModuleDiff)
+ for _, m := range d.Modules {
+ key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], "."))
+ keys = append(keys, key)
+ lookup[key] = m
+ }
+ sort.Strings(keys)
+
+ for _, key := range keys {
+ m := lookup[key]
+ mStr := m.String()
+
+ // If we're the root module, we just write the output directly.
+ if reflect.DeepEqual(m.Path, rootModulePath) {
+ buf.WriteString(mStr + "\n")
+ continue
+ }
+
+ buf.WriteString(fmt.Sprintf("%s:\n", key))
+
+ s := bufio.NewScanner(strings.NewReader(mStr))
+ for s.Scan() {
+ buf.WriteString(fmt.Sprintf(" %s\n", s.Text()))
+ }
+ }
+
+ return strings.TrimSpace(buf.String())
+}
+
+func (d *Diff) init() {
+ if d.Modules == nil {
+ rootDiff := &ModuleDiff{Path: rootModulePath}
+ d.Modules = []*ModuleDiff{rootDiff}
+ }
+ for _, m := range d.Modules {
+ m.init()
+ }
+}
+
+// ModuleDiff tracks the differences between resources to apply within
+// a single module.
+type ModuleDiff struct {
+ Path []string
+ Resources map[string]*InstanceDiff
+ Destroy bool // Set only by the destroy plan
+}
+
+func (d *ModuleDiff) init() {
+ if d.Resources == nil {
+ d.Resources = make(map[string]*InstanceDiff)
+ }
+ for _, r := range d.Resources {
+ r.init()
+ }
+}
+
+// ChangeType returns the type of changes that the diff for this
+// module includes.
+//
+// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or
+// DiffCreate. If an instance within the module has a DiffDestroyCreate
+// then this will register as a DiffCreate for a module.
+func (d *ModuleDiff) ChangeType() DiffChangeType {
+ result := DiffNone
+ for _, r := range d.Resources {
+ change := r.ChangeType()
+ switch change {
+ case DiffCreate, DiffDestroy:
+ if result == DiffNone {
+ result = change
+ }
+ case DiffDestroyCreate, DiffUpdate:
+ result = DiffUpdate
+ }
+ }
+
+ return result
+}
+
+// Empty returns true if the diff has no changes within this module.
+func (d *ModuleDiff) Empty() bool {
+ if d.Destroy {
+ return false
+ }
+
+ if len(d.Resources) == 0 {
+ return true
+ }
+
+ for _, rd := range d.Resources {
+ if !rd.Empty() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Instances returns the instance diffs for the id given. This can return
+// multiple instance diffs if there are counts within the resource.
+func (d *ModuleDiff) Instances(id string) []*InstanceDiff {
+ var result []*InstanceDiff
+ for k, diff := range d.Resources {
+ if k == id || strings.HasPrefix(k, id+".") {
+ if !diff.Empty() {
+ result = append(result, diff)
+ }
+ }
+ }
+
+ return result
+}
+
+// IsRoot says whether or not this module diff is for the root module.
+func (d *ModuleDiff) IsRoot() bool {
+ return reflect.DeepEqual(d.Path, rootModulePath)
+}
+
+// String outputs the diff in a long but command-line friendly output
+// format that users can read to quickly inspect a diff.
+func (d *ModuleDiff) String() string {
+ var buf bytes.Buffer
+
+ names := make([]string, 0, len(d.Resources))
+ for name, _ := range d.Resources {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ for _, name := range names {
+ rdiff := d.Resources[name]
+
+ crud := "UPDATE"
+ switch {
+ case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()):
+ crud = "DESTROY/CREATE"
+ case rdiff.GetDestroy() || rdiff.GetDestroyDeposed():
+ crud = "DESTROY"
+ case rdiff.RequiresNew():
+ crud = "CREATE"
+ }
+
+ extra := ""
+ if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() {
+ extra = " (deposed only)"
+ }
+
+ buf.WriteString(fmt.Sprintf(
+ "%s: %s%s\n",
+ crud,
+ name,
+ extra))
+
+ keyLen := 0
+ rdiffAttrs := rdiff.CopyAttributes()
+ keys := make([]string, 0, len(rdiffAttrs))
+ for key, _ := range rdiffAttrs {
+ if key == "id" {
+ continue
+ }
+
+ keys = append(keys, key)
+ if len(key) > keyLen {
+ keyLen = len(key)
+ }
+ }
+ sort.Strings(keys)
+
+ for _, attrK := range keys {
+ attrDiff, _ := rdiff.GetAttribute(attrK)
+
+ v := attrDiff.New
+ u := attrDiff.Old
+ if attrDiff.NewComputed {
+ v = "<computed>"
+ }
+
+ if attrDiff.Sensitive {
+ u = "<sensitive>"
+ v = "<sensitive>"
+ }
+
+ updateMsg := ""
+ if attrDiff.RequiresNew {
+ updateMsg = " (forces new resource)"
+ } else if attrDiff.Sensitive {
+ updateMsg = " (attribute changed)"
+ }
+
+ buf.WriteString(fmt.Sprintf(
+ " %s:%s %#v => %#v%s\n",
+ attrK,
+ strings.Repeat(" ", keyLen-len(attrK)),
+ u,
+ v,
+ updateMsg))
+ }
+ }
+
+ return buf.String()
+}
+
+// InstanceDiff is the diff of a resource from some state to another.
+type InstanceDiff struct {
+ mu sync.Mutex
+ Attributes map[string]*ResourceAttrDiff
+ Destroy bool
+ DestroyDeposed bool
+ DestroyTainted bool
+
+ // Meta is a simple K/V map that is stored in a diff and persisted to
+ // plans but otherwise is completely ignored by Terraform core. It is
+ // mean to be used for additional data a resource may want to pass through.
+ // The value here must only contain Go primitives and collections.
+ Meta map[string]interface{}
+}
+
+func (d *InstanceDiff) Lock() { d.mu.Lock() }
+func (d *InstanceDiff) Unlock() { d.mu.Unlock() }
+
+// ResourceAttrDiff is the diff of a single attribute of a resource.
+type ResourceAttrDiff struct {
+ Old string // Old Value
+ New string // New Value
+ NewComputed bool // True if new value is computed (unknown currently)
+ NewRemoved bool // True if this attribute is being removed
+ NewExtra interface{} // Extra information for the provider
+ RequiresNew bool // True if change requires new resource
+ Sensitive bool // True if the data should not be displayed in UI output
+ Type DiffAttrType
+}
+
+// Empty returns true if the diff for this attr is neutral
+func (d *ResourceAttrDiff) Empty() bool {
+ return d.Old == d.New && !d.NewComputed && !d.NewRemoved
+}
+
+func (d *ResourceAttrDiff) GoString() string {
+ return fmt.Sprintf("*%#v", *d)
+}
+
+// DiffAttrType is an enum type that says whether a resource attribute
+// diff is an input attribute (comes from the configuration) or an
+// output attribute (comes as a result of applying the configuration). An
+// example input would be "ami" for AWS and an example output would be
+// "private_ip".
+type DiffAttrType byte
+
+const (
+ DiffAttrUnknown DiffAttrType = iota
+ DiffAttrInput
+ DiffAttrOutput
+)
+
+func (d *InstanceDiff) init() {
+ if d.Attributes == nil {
+ d.Attributes = make(map[string]*ResourceAttrDiff)
+ }
+}
+
+func NewInstanceDiff() *InstanceDiff {
+ return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)}
+}
+
+func (d *InstanceDiff) Copy() (*InstanceDiff, error) {
+ if d == nil {
+ return nil, nil
+ }
+
+ dCopy, err := copystructure.Config{Lock: true}.Copy(d)
+ if err != nil {
+ return nil, err
+ }
+
+ return dCopy.(*InstanceDiff), nil
+}
+
+// ChangeType returns the DiffChangeType represented by the diff
+// for this single instance.
+func (d *InstanceDiff) ChangeType() DiffChangeType {
+ if d.Empty() {
+ return DiffNone
+ }
+
+ if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) {
+ return DiffDestroyCreate
+ }
+
+ if d.GetDestroy() || d.GetDestroyDeposed() {
+ return DiffDestroy
+ }
+
+ if d.RequiresNew() {
+ return DiffCreate
+ }
+
+ return DiffUpdate
+}
+
+// Empty returns true if this diff encapsulates no changes.
+func (d *InstanceDiff) Empty() bool {
+ if d == nil {
+ return true
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ return !d.Destroy &&
+ !d.DestroyTainted &&
+ !d.DestroyDeposed &&
+ len(d.Attributes) == 0
+}
+
+// Equal compares two diffs for exact equality.
+//
+// This is different from the Same comparison that is supported which
+// checks for operation equality taking into account computed values. Equal
+// instead checks for exact equality.
+func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool {
+ // If one is nil, they must both be nil
+ if d == nil || d2 == nil {
+ return d == d2
+ }
+
+ // Use DeepEqual
+ return reflect.DeepEqual(d, d2)
+}
+
+// DeepCopy performs a deep copy of all parts of the InstanceDiff
+func (d *InstanceDiff) DeepCopy() *InstanceDiff {
+ copy, err := copystructure.Config{Lock: true}.Copy(d)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*InstanceDiff)
+}
+
+func (d *InstanceDiff) GoString() string {
+ return fmt.Sprintf("*%#v", InstanceDiff{
+ Attributes: d.Attributes,
+ Destroy: d.Destroy,
+ DestroyTainted: d.DestroyTainted,
+ DestroyDeposed: d.DestroyDeposed,
+ })
+}
+
+// RequiresNew returns true if the diff requires the creation of a new
+// resource (implying the destruction of the old).
+func (d *InstanceDiff) RequiresNew() bool {
+ if d == nil {
+ return false
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.requiresNew()
+}
+
+func (d *InstanceDiff) requiresNew() bool {
+ if d == nil {
+ return false
+ }
+
+ if d.DestroyTainted {
+ return true
+ }
+
+ for _, rd := range d.Attributes {
+ if rd != nil && rd.RequiresNew {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (d *InstanceDiff) GetDestroyDeposed() bool {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.DestroyDeposed
+}
+
+func (d *InstanceDiff) SetDestroyDeposed(b bool) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.DestroyDeposed = b
+}
+
+// These methods are properly locked, for use outside other InstanceDiff
+// methods but everywhere else within in the terraform package.
+// TODO refactor the locking scheme
+func (d *InstanceDiff) SetTainted(b bool) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.DestroyTainted = b
+}
+
+func (d *InstanceDiff) GetDestroyTainted() bool {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.DestroyTainted
+}
+
+func (d *InstanceDiff) SetDestroy(b bool) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.Destroy = b
+}
+
+func (d *InstanceDiff) GetDestroy() bool {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.Destroy
+}
+
+func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.Attributes[key] = attr
+}
+
+func (d *InstanceDiff) DelAttribute(key string) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ delete(d.Attributes, key)
+}
+
+func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ attr, ok := d.Attributes[key]
+ return attr, ok
+}
+func (d *InstanceDiff) GetAttributesLen() int {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return len(d.Attributes)
+}
+
+// Safely copies the Attributes map
+func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ attrs := make(map[string]*ResourceAttrDiff)
+ for k, v := range d.Attributes {
+ attrs[k] = v
+ }
+
+ return attrs
+}
+
+// Same checks whether or not two InstanceDiff's are the "same". When
+// we say "same", it is not necessarily exactly equal. Instead, it is
+// just checking that the same attributes are changing, a destroy
+// isn't suddenly happening, etc.
+func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) {
+ // we can safely compare the pointers without a lock
+ switch {
+ case d == nil && d2 == nil:
+ return true, ""
+ case d == nil || d2 == nil:
+ return false, "one nil"
+ case d == d2:
+ return true, ""
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ // If we're going from requiring new to NOT requiring new, then we have
+ // to see if all required news were computed. If so, it is allowed since
+ // computed may also mean "same value and therefore not new".
+ oldNew := d.requiresNew()
+ newNew := d2.RequiresNew()
+ if oldNew && !newNew {
+ oldNew = false
+
+ // This section builds a list of ignorable attributes for requiresNew
+ // by removing off any elements of collections going to zero elements.
+ // For collections going to zero, they may not exist at all in the
+ // new diff (and hence RequiresNew == false).
+ ignoreAttrs := make(map[string]struct{})
+ for k, diffOld := range d.Attributes {
+ if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") {
+ continue
+ }
+
+ // This case is in here as a protection measure. The bug that this
+ // code originally fixed (GH-11349) didn't have to deal with computed
+ // so I'm not 100% sure what the correct behavior is. Best to leave
+ // the old behavior.
+ if diffOld.NewComputed {
+ continue
+ }
+
+ // We're looking for the case a map goes to exactly 0.
+ if diffOld.New != "0" {
+ continue
+ }
+
+ // Found it! Ignore all of these. The prefix here is stripping
+ // off the "%" so it is just "k."
+ prefix := k[:len(k)-1]
+ for k2, _ := range d.Attributes {
+ if strings.HasPrefix(k2, prefix) {
+ ignoreAttrs[k2] = struct{}{}
+ }
+ }
+ }
+
+ for k, rd := range d.Attributes {
+ if _, ok := ignoreAttrs[k]; ok {
+ continue
+ }
+
+ // If the field is requires new and NOT computed, then what
+ // we have is a diff mismatch for sure. We set that the old
+ // diff does REQUIRE a ForceNew.
+ if rd != nil && rd.RequiresNew && !rd.NewComputed {
+ oldNew = true
+ break
+ }
+ }
+ }
+
+ if oldNew != newNew {
+ return false, fmt.Sprintf(
+ "diff RequiresNew; old: %t, new: %t", oldNew, newNew)
+ }
+
+ // Verify that destroy matches. The second boolean here allows us to
+ // have mismatching Destroy if we're moving from RequiresNew true
+ // to false above. Therefore, the second boolean will only pass if
+ // we're moving from Destroy: true to false as well.
+ if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew {
+ return false, fmt.Sprintf(
+ "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy())
+ }
+
+ // Go through the old diff and make sure the new diff has all the
+ // same attributes. To start, build up the check map to be all the keys.
+ checkOld := make(map[string]struct{})
+ checkNew := make(map[string]struct{})
+ for k, _ := range d.Attributes {
+ checkOld[k] = struct{}{}
+ }
+ for k, _ := range d2.CopyAttributes() {
+ checkNew[k] = struct{}{}
+ }
+
+ // Make an ordered list so we are sure the approximated hashes are left
+ // to process at the end of the loop
+ keys := make([]string, 0, len(d.Attributes))
+ for k, _ := range d.Attributes {
+ keys = append(keys, k)
+ }
+ sort.StringSlice(keys).Sort()
+
+ for _, k := range keys {
+ diffOld := d.Attributes[k]
+
+ if _, ok := checkOld[k]; !ok {
+ // We're not checking this key for whatever reason (see where
+ // check is modified).
+ continue
+ }
+
+ // Remove this key since we'll never hit it again
+ delete(checkOld, k)
+ delete(checkNew, k)
+
+ _, ok := d2.GetAttribute(k)
+ if !ok {
+ // If there's no new attribute, and the old diff expected the attribute
+ // to be removed, that's just fine.
+ if diffOld.NewRemoved {
+ continue
+ }
+
+ // If the last diff was a computed value then the absense of
+ // that value is allowed since it may mean the value ended up
+ // being the same.
+ if diffOld.NewComputed {
+ ok = true
+ }
+
+ // No exact match, but maybe this is a set containing computed
+ // values. So check if there is an approximate hash in the key
+ // and if so, try to match the key.
+ if strings.Contains(k, "~") {
+ parts := strings.Split(k, ".")
+ parts2 := append([]string(nil), parts...)
+
+ re := regexp.MustCompile(`^~\d+$`)
+ for i, part := range parts {
+ if re.MatchString(part) {
+ // we're going to consider this the base of a
+ // computed hash, and remove all longer matching fields
+ ok = true
+
+ parts2[i] = `\d+`
+ parts2 = parts2[:i+1]
+ break
+ }
+ }
+
+ re, err := regexp.Compile("^" + strings.Join(parts2, `\.`))
+ if err != nil {
+ return false, fmt.Sprintf("regexp failed to compile; err: %#v", err)
+ }
+
+ for k2, _ := range checkNew {
+ if re.MatchString(k2) {
+ delete(checkNew, k2)
+ }
+ }
+ }
+
+ // This is a little tricky, but when a diff contains a computed
+ // list, set, or map that can only be interpolated after the apply
+ // command has created the dependent resources, it could turn out
+ // that the result is actually the same as the existing state which
+ // would remove the key from the diff.
+ if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
+ ok = true
+ }
+
+ // Similarly, in a RequiresNew scenario, a list that shows up in the plan
+ // diff can disappear from the apply diff, which is calculated from an
+ // empty state.
+ if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
+ ok = true
+ }
+
+ if !ok {
+ return false, fmt.Sprintf("attribute mismatch: %s", k)
+ }
+ }
+
+ // search for the suffix of the base of a [computed] map, list or set.
+ match := multiVal.FindStringSubmatch(k)
+
+ if diffOld.NewComputed && len(match) == 2 {
+ matchLen := len(match[1])
+
+ // This is a computed list, set, or map, so remove any keys with
+ // this prefix from the check list.
+ kprefix := k[:len(k)-matchLen]
+ for k2, _ := range checkOld {
+ if strings.HasPrefix(k2, kprefix) {
+ delete(checkOld, k2)
+ }
+ }
+ for k2, _ := range checkNew {
+ if strings.HasPrefix(k2, kprefix) {
+ delete(checkNew, k2)
+ }
+ }
+ }
+
+ // TODO: check for the same value if not computed
+ }
+
+ // Check for leftover attributes
+ if len(checkNew) > 0 {
+ extras := make([]string, 0, len(checkNew))
+ for attr, _ := range checkNew {
+ extras = append(extras, attr)
+ }
+ return false,
+ fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", "))
+ }
+
+ return true, ""
+}
+
+// moduleDiffSort implements sort.Interface to sort module diffs by path.
+type moduleDiffSort []*ModuleDiff
+
+func (s moduleDiffSort) Len() int { return len(s) }
+func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s moduleDiffSort) Less(i, j int) bool {
+ a := s[i]
+ b := s[j]
+
+ // If the lengths are different, then the shorter one always wins
+ if len(a.Path) != len(b.Path) {
+ return len(a.Path) < len(b.Path)
+ }
+
+ // Otherwise, compare lexically
+ return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
new file mode 100644
index 00000000..bc9d638a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
@@ -0,0 +1,17 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// DestroyEdge is an edge that represents a standard "destroy" relationship:
+// Target depends on Source because Source is destroying.
+type DestroyEdge struct {
+ S, T dag.Vertex
+}
+
+func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) }
+func (e *DestroyEdge) Source() dag.Vertex { return e.S }
+func (e *DestroyEdge) Target() dag.Vertex { return e.T }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go
new file mode 100644
index 00000000..3cb088a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go
@@ -0,0 +1,63 @@
+package terraform
+
+import (
+ "log"
+ "strings"
+)
+
+// EvalNode is the interface that must be implemented by graph nodes to
+// evaluate/execute.
+type EvalNode interface {
+ // Eval evaluates this node with the given context. The second parameter
+ // are the argument values. These will match in order and 1-1 with the
+ // results of the Args() return value.
+ Eval(EvalContext) (interface{}, error)
+}
+
+// GraphNodeEvalable is the interface that graph nodes must implement
+// to enable valuation.
+type GraphNodeEvalable interface {
+ EvalTree() EvalNode
+}
+
+// EvalEarlyExitError is a special error return value that can be returned
+// by eval nodes that does an early exit.
+type EvalEarlyExitError struct{}
+
+func (EvalEarlyExitError) Error() string { return "early exit" }
+
+// Eval evaluates the given EvalNode with the given context, properly
+// evaluating all args in the correct order.
+func Eval(n EvalNode, ctx EvalContext) (interface{}, error) {
+ // Call the lower level eval which doesn't understand early exit,
+ // and if we early exit, it isn't an error.
+ result, err := EvalRaw(n, ctx)
+ if err != nil {
+ if _, ok := err.(EvalEarlyExitError); ok {
+ return nil, nil
+ }
+ }
+
+ return result, err
+}
+
+// EvalRaw is like Eval except that it returns all errors, even if they
+// signal something normal such as EvalEarlyExitError.
+func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) {
+ path := "unknown"
+ if ctx != nil {
+ path = strings.Join(ctx.Path(), ".")
+ }
+
+ log.Printf("[DEBUG] %s: eval: %T", path, n)
+ output, err := n.Eval(ctx)
+ if err != nil {
+ if _, ok := err.(EvalEarlyExitError); ok {
+ log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err)
+ } else {
+ log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err)
+ }
+ }
+
+ return output, err
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
new file mode 100644
index 00000000..2f6a4973
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
@@ -0,0 +1,359 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strconv"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalApply is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalApply struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Diff **InstanceDiff
+ Provider *ResourceProvider
+ Output **InstanceState
+ CreateNew *bool
+ Error *error
+}
+
+// TODO: test
+func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
+ diff := *n.Diff
+ provider := *n.Provider
+ state := *n.State
+
+ // If we have no diff, we have nothing to do!
+ if diff.Empty() {
+ log.Printf(
+ "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id)
+ return nil, nil
+ }
+
+ // Remove any output values from the diff
+ for k, ad := range diff.CopyAttributes() {
+ if ad.Type == DiffAttrOutput {
+ diff.DelAttribute(k)
+ }
+ }
+
+ // If the state is nil, make it non-nil
+ if state == nil {
+ state = new(InstanceState)
+ }
+ state.init()
+
+ // Flag if we're creating a new instance
+ if n.CreateNew != nil {
+ *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew()
+ }
+
+ // With the completed diff, apply!
+ log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id)
+ state, err := provider.Apply(n.Info, state, diff)
+ if state == nil {
+ state = new(InstanceState)
+ }
+ state.init()
+
+ // Force the "id" attribute to be our ID
+ if state.ID != "" {
+ state.Attributes["id"] = state.ID
+ }
+
+ // If the value is the unknown variable value, then it is an error.
+ // In this case we record the error and remove it from the state
+ for ak, av := range state.Attributes {
+ if av == config.UnknownVariableValue {
+ err = multierror.Append(err, fmt.Errorf(
+ "Attribute with unknown value: %s", ak))
+ delete(state.Attributes, ak)
+ }
+ }
+
+ // Write the final state
+ if n.Output != nil {
+ *n.Output = state
+ }
+
+ // If there are no errors, then we append it to our output error
+ // if we have one, otherwise we just output it.
+ if err != nil {
+ if n.Error != nil {
+ helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error())
+ *n.Error = multierror.Append(*n.Error, helpfulErr)
+ } else {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalApplyPre is an EvalNode implementation that does the pre-Apply work
+type EvalApplyPre struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Diff **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+ diff := *n.Diff
+
+ // If the state is nil, make it non-nil
+ if state == nil {
+ state = new(InstanceState)
+ }
+ state.init()
+
+ {
+ // Call post-apply hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreApply(n.Info, state, diff)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalApplyPost is an EvalNode implementation that does the post-Apply work
+type EvalApplyPost struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Error *error
+}
+
+// TODO: test
+func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+
+ {
+ // Call post-apply hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostApply(n.Info, state, *n.Error)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, *n.Error
+}
+
+// EvalApplyProvisioners is an EvalNode implementation that executes
+// the provisioners for a resource.
+//
+// TODO(mitchellh): This should probably be split up into a more fine-grained
+// ApplyProvisioner (single) that is looped over.
+type EvalApplyProvisioners struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Resource *config.Resource
+ InterpResource *Resource
+ CreateNew *bool
+ Error *error
+
+ // When is the type of provisioner to run at this point
+ When config.ProvisionerWhen
+}
+
+// TODO: test
+func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+
+ if n.CreateNew != nil && !*n.CreateNew {
+ // If we're not creating a new resource, then don't run provisioners
+ return nil, nil
+ }
+
+ provs := n.filterProvisioners()
+ if len(provs) == 0 {
+ // We have no provisioners, so don't do anything
+ return nil, nil
+ }
+
+ // taint tells us whether to enable tainting.
+ taint := n.When == config.ProvisionerWhenCreate
+
+ if n.Error != nil && *n.Error != nil {
+ if taint {
+ state.Tainted = true
+ }
+
+ // We're already tainted, so just return out
+ return nil, nil
+ }
+
+ {
+ // Call pre hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreProvisionResource(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // If there are no errors, then we append it to our output error
+ // if we have one, otherwise we just output it.
+ err := n.apply(ctx, provs)
+ if err != nil {
+ if taint {
+ state.Tainted = true
+ }
+
+ if n.Error != nil {
+ *n.Error = multierror.Append(*n.Error, err)
+ } else {
+ return nil, err
+ }
+ }
+
+ {
+ // Call post hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostProvisionResource(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// filterProvisioners filters the provisioners on the resource to only
+// the provisioners specified by the "when" option.
+func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner {
+ // Fast path the zero case
+ if n.Resource == nil {
+ return nil
+ }
+
+ if len(n.Resource.Provisioners) == 0 {
+ return nil
+ }
+
+ result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners))
+ for _, p := range n.Resource.Provisioners {
+ if p.When == n.When {
+ result = append(result, p)
+ }
+ }
+
+ return result
+}
+
+func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error {
+ state := *n.State
+
+ // Store the original connection info, restore later
+ origConnInfo := state.Ephemeral.ConnInfo
+ defer func() {
+ state.Ephemeral.ConnInfo = origConnInfo
+ }()
+
+ for _, prov := range provs {
+ // Get the provisioner
+ provisioner := ctx.Provisioner(prov.Type)
+
+ // Interpolate the provisioner config
+ provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource)
+ if err != nil {
+ return err
+ }
+
+ // Interpolate the conn info, since it may contain variables
+ connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource)
+ if err != nil {
+ return err
+ }
+
+ // Merge the connection information
+ overlay := make(map[string]string)
+ if origConnInfo != nil {
+ for k, v := range origConnInfo {
+ overlay[k] = v
+ }
+ }
+ for k, v := range connInfo.Config {
+ switch vt := v.(type) {
+ case string:
+ overlay[k] = vt
+ case int64:
+ overlay[k] = strconv.FormatInt(vt, 10)
+ case int32:
+ overlay[k] = strconv.FormatInt(int64(vt), 10)
+ case int:
+ overlay[k] = strconv.FormatInt(int64(vt), 10)
+ case float32:
+ overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32)
+ case float64:
+ overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64)
+ case bool:
+ overlay[k] = strconv.FormatBool(vt)
+ default:
+ overlay[k] = fmt.Sprintf("%v", vt)
+ }
+ }
+ state.Ephemeral.ConnInfo = overlay
+
+ {
+ // Call pre hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreProvision(n.Info, prov.Type)
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ // The output function
+ outputFn := func(msg string) {
+ ctx.Hook(func(h Hook) (HookAction, error) {
+ h.ProvisionOutput(n.Info, prov.Type, msg)
+ return HookActionContinue, nil
+ })
+ }
+
+ // Invoke the Provisioner
+ output := CallbackUIOutput{OutputFn: outputFn}
+ applyErr := provisioner.Apply(&output, state, provConfig)
+
+ // Call post hook
+ hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostProvision(n.Info, prov.Type, applyErr)
+ })
+
+ // Handle the error before we deal with the hook
+ if applyErr != nil {
+ // Determine failure behavior
+ switch prov.OnFailure {
+ case config.ProvisionerOnFailureContinue:
+ log.Printf(
+ "[INFO] apply: %s [%s]: error during provision, continue requested",
+ n.Info.Id, prov.Type)
+
+ case config.ProvisionerOnFailureFail:
+ return applyErr
+ }
+ }
+
+ // Deal with the hook
+ if hookErr != nil {
+ return hookErr
+ }
+ }
+
+ return nil
+
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
new file mode 100644
index 00000000..715e79e1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
@@ -0,0 +1,38 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalPreventDestroy is an EvalNode implementation that returns an
+// error if a resource has PreventDestroy configured and the diff
+// would destroy the resource.
+type EvalCheckPreventDestroy struct {
+ Resource *config.Resource
+ ResourceId string
+ Diff **InstanceDiff
+}
+
+func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) {
+ if n.Diff == nil || *n.Diff == nil || n.Resource == nil {
+ return nil, nil
+ }
+
+ diff := *n.Diff
+ preventDestroy := n.Resource.Lifecycle.PreventDestroy
+
+ if diff.GetDestroy() && preventDestroy {
+ resourceId := n.ResourceId
+ if resourceId == "" {
+ resourceId = n.Resource.Id()
+ }
+
+ return nil, fmt.Errorf(preventDestroyErrStr, resourceId)
+ }
+
+ return nil, nil
+}
+
+const preventDestroyErrStr = `%s: the plan would destroy this resource, but it currently has lifecycle.prevent_destroy set to true. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or adjust the scope of the plan using the -target flag.`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
new file mode 100644
index 00000000..a1f815b7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
@@ -0,0 +1,84 @@
+package terraform
+
+import (
+ "sync"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalContext is the interface that is given to eval nodes to execute.
+type EvalContext interface {
+ // Stopped returns a channel that is closed when evaluation is stopped
+ // via Terraform.Context.Stop()
+ Stopped() <-chan struct{}
+
+ // Path is the current module path.
+ Path() []string
+
+ // Hook is used to call hook methods. The callback is called for each
+ // hook and should return the hook action to take and the error.
+ Hook(func(Hook) (HookAction, error)) error
+
+ // Input is the UIInput object for interacting with the UI.
+ Input() UIInput
+
+ // InitProvider initializes the provider with the given name and
+ // returns the implementation of the resource provider or an error.
+ //
+ // It is an error to initialize the same provider more than once.
+ InitProvider(string) (ResourceProvider, error)
+
+ // Provider gets the provider instance with the given name (already
+ // initialized) or returns nil if the provider isn't initialized.
+ Provider(string) ResourceProvider
+
+ // CloseProvider closes provider connections that aren't needed anymore.
+ CloseProvider(string) error
+
+ // ConfigureProvider configures the provider with the given
+ // configuration. This is a separate context call because this call
+ // is used to store the provider configuration for inheritance lookups
+ // with ParentProviderConfig().
+ ConfigureProvider(string, *ResourceConfig) error
+ SetProviderConfig(string, *ResourceConfig) error
+ ParentProviderConfig(string) *ResourceConfig
+
+ // ProviderInput and SetProviderInput are used to configure providers
+ // from user input.
+ ProviderInput(string) map[string]interface{}
+ SetProviderInput(string, map[string]interface{})
+
+ // InitProvisioner initializes the provisioner with the given name and
+ // returns the implementation of the resource provisioner or an error.
+ //
+ // It is an error to initialize the same provisioner more than once.
+ InitProvisioner(string) (ResourceProvisioner, error)
+
+ // Provisioner gets the provisioner instance with the given name (already
+ // initialized) or returns nil if the provisioner isn't initialized.
+ Provisioner(string) ResourceProvisioner
+
+ // CloseProvisioner closes provisioner connections that aren't needed
+ // anymore.
+ CloseProvisioner(string) error
+
+ // Interpolate takes the given raw configuration and completes
+ // the interpolations, returning the processed ResourceConfig.
+ //
+ // The resource argument is optional. If given, it is the resource
+ // that is currently being acted upon.
+ Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error)
+
+ // SetVariables sets the variables for the module within
+ // this context with the name n. This function call is additive:
+ // the second parameter is merged with any previous call.
+ SetVariables(string, map[string]interface{})
+
+ // Diff returns the global diff as well as the lock that should
+ // be used to modify that diff.
+ Diff() (*Diff, *sync.RWMutex)
+
+ // State returns the global state as well as the lock that should
+ // be used to modify that state.
+ State() (*State, *sync.RWMutex)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
new file mode 100644
index 00000000..3dcfb227
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
@@ -0,0 +1,347 @@
+package terraform
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// BuiltinEvalContext is an EvalContext implementation that is used by
+// Terraform by default.
+type BuiltinEvalContext struct {
+ // StopContext is the context used to track whether we're complete
+ StopContext context.Context
+
+ // PathValue is the Path that this context is operating within.
+ PathValue []string
+
+ // Interpolater setting below affect the interpolation of variables.
+ //
+ // The InterpolaterVars are the exact value for ${var.foo} values.
+ // The map is shared between all contexts and is a mapping of
+ // PATH to KEY to VALUE. Because it is shared by all contexts as well
+ // as the Interpolater itself, it is protected by InterpolaterVarLock
+ // which must be locked during any access to the map.
+ Interpolater *Interpolater
+ InterpolaterVars map[string]map[string]interface{}
+ InterpolaterVarLock *sync.Mutex
+
+ Components contextComponentFactory
+ Hooks []Hook
+ InputValue UIInput
+ ProviderCache map[string]ResourceProvider
+ ProviderConfigCache map[string]*ResourceConfig
+ ProviderInputConfig map[string]map[string]interface{}
+ ProviderLock *sync.Mutex
+ ProvisionerCache map[string]ResourceProvisioner
+ ProvisionerLock *sync.Mutex
+ DiffValue *Diff
+ DiffLock *sync.RWMutex
+ StateValue *State
+ StateLock *sync.RWMutex
+
+ once sync.Once
+}
+
+func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} {
+ // This can happen during tests. During tests, we just block forever.
+ if ctx.StopContext == nil {
+ return nil
+ }
+
+ return ctx.StopContext.Done()
+}
+
+func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
+ for _, h := range ctx.Hooks {
+ action, err := fn(h)
+ if err != nil {
+ return err
+ }
+
+ switch action {
+ case HookActionContinue:
+ continue
+ case HookActionHalt:
+ // Return an early exit error to trigger an early exit
+ log.Printf("[WARN] Early exit triggered by hook: %T", h)
+ return EvalEarlyExitError{}
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) Input() UIInput {
+ return ctx.InputValue
+}
+
+func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) {
+ ctx.once.Do(ctx.init)
+
+ // If we already initialized, it is an error
+ if p := ctx.Provider(n); p != nil {
+ return nil, fmt.Errorf("Provider '%s' already initialized", n)
+ }
+
+ // Warning: make sure to acquire these locks AFTER the call to Provider
+ // above, since it also acquires locks.
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+ key := PathCacheKey(providerPath)
+
+ typeName := strings.SplitN(n, ".", 2)[0]
+ p, err := ctx.Components.ResourceProvider(typeName, key)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx.ProviderCache[key] = p
+ return p, nil
+}
+
+func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider {
+ ctx.once.Do(ctx.init)
+
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+
+ return ctx.ProviderCache[PathCacheKey(providerPath)]
+}
+
+func (ctx *BuiltinEvalContext) CloseProvider(n string) error {
+ ctx.once.Do(ctx.init)
+
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+
+ var provider interface{}
+ provider = ctx.ProviderCache[PathCacheKey(providerPath)]
+ if provider != nil {
+ if p, ok := provider.(ResourceProviderCloser); ok {
+ delete(ctx.ProviderCache, PathCacheKey(providerPath))
+ return p.Close()
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) ConfigureProvider(
+ n string, cfg *ResourceConfig) error {
+ p := ctx.Provider(n)
+ if p == nil {
+ return fmt.Errorf("Provider '%s' not initialized", n)
+ }
+
+ if err := ctx.SetProviderConfig(n, cfg); err != nil {
+ return nil
+ }
+
+ return p.Configure(cfg)
+}
+
+func (ctx *BuiltinEvalContext) SetProviderConfig(
+ n string, cfg *ResourceConfig) error {
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+
+ // Save the configuration
+ ctx.ProviderLock.Lock()
+ ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg
+ ctx.ProviderLock.Unlock()
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} {
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ // Make a copy of the path so we can safely edit it
+ path := ctx.Path()
+ pathCopy := make([]string, len(path)+1)
+ copy(pathCopy, path)
+
+ // Go up the tree.
+ for i := len(path) - 1; i >= 0; i-- {
+ pathCopy[i+1] = n
+ k := PathCacheKey(pathCopy[:i+2])
+ if v, ok := ctx.ProviderInputConfig[k]; ok {
+ return v
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) {
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+
+ // Save the configuration
+ ctx.ProviderLock.Lock()
+ ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c
+ ctx.ProviderLock.Unlock()
+}
+
+func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig {
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ // Make a copy of the path so we can safely edit it
+ path := ctx.Path()
+ pathCopy := make([]string, len(path)+1)
+ copy(pathCopy, path)
+
+ // Go up the tree.
+ for i := len(path) - 1; i >= 0; i-- {
+ pathCopy[i+1] = n
+ k := PathCacheKey(pathCopy[:i+2])
+ if v, ok := ctx.ProviderConfigCache[k]; ok {
+ return v
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) InitProvisioner(
+ n string) (ResourceProvisioner, error) {
+ ctx.once.Do(ctx.init)
+
+ // If we already initialized, it is an error
+ if p := ctx.Provisioner(n); p != nil {
+ return nil, fmt.Errorf("Provisioner '%s' already initialized", n)
+ }
+
+ // Warning: make sure to acquire these locks AFTER the call to Provisioner
+ // above, since it also acquires locks.
+ ctx.ProvisionerLock.Lock()
+ defer ctx.ProvisionerLock.Unlock()
+
+ provPath := make([]string, len(ctx.Path())+1)
+ copy(provPath, ctx.Path())
+ provPath[len(provPath)-1] = n
+ key := PathCacheKey(provPath)
+
+ p, err := ctx.Components.ResourceProvisioner(n, key)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx.ProvisionerCache[key] = p
+ return p, nil
+}
+
+func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner {
+ ctx.once.Do(ctx.init)
+
+ ctx.ProvisionerLock.Lock()
+ defer ctx.ProvisionerLock.Unlock()
+
+ provPath := make([]string, len(ctx.Path())+1)
+ copy(provPath, ctx.Path())
+ provPath[len(provPath)-1] = n
+
+ return ctx.ProvisionerCache[PathCacheKey(provPath)]
+}
+
+func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
+ ctx.once.Do(ctx.init)
+
+ ctx.ProvisionerLock.Lock()
+ defer ctx.ProvisionerLock.Unlock()
+
+ provPath := make([]string, len(ctx.Path())+1)
+ copy(provPath, ctx.Path())
+ provPath[len(provPath)-1] = n
+
+ var prov interface{}
+ prov = ctx.ProvisionerCache[PathCacheKey(provPath)]
+ if prov != nil {
+ if p, ok := prov.(ResourceProvisionerCloser); ok {
+ delete(ctx.ProvisionerCache, PathCacheKey(provPath))
+ return p.Close()
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) Interpolate(
+ cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) {
+ if cfg != nil {
+ scope := &InterpolationScope{
+ Path: ctx.Path(),
+ Resource: r,
+ }
+
+ vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
+ if err != nil {
+ return nil, err
+ }
+
+ // Do the interpolation
+ if err := cfg.Interpolate(vs); err != nil {
+ return nil, err
+ }
+ }
+
+ result := NewResourceConfig(cfg)
+ result.interpolateForce()
+ return result, nil
+}
+
+func (ctx *BuiltinEvalContext) Path() []string {
+ return ctx.PathValue
+}
+
+func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) {
+ ctx.InterpolaterVarLock.Lock()
+ defer ctx.InterpolaterVarLock.Unlock()
+
+ path := make([]string, len(ctx.Path())+1)
+ copy(path, ctx.Path())
+ path[len(path)-1] = n
+ key := PathCacheKey(path)
+
+ vars := ctx.InterpolaterVars[key]
+ if vars == nil {
+ vars = make(map[string]interface{})
+ ctx.InterpolaterVars[key] = vars
+ }
+
+ for k, v := range vs {
+ vars[k] = v
+ }
+}
+
+func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) {
+ return ctx.DiffValue, ctx.DiffLock
+}
+
+func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) {
+ return ctx.StateValue, ctx.StateLock
+}
+
+func (ctx *BuiltinEvalContext) init() {
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
new file mode 100644
index 00000000..4f90d5b1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
@@ -0,0 +1,208 @@
+package terraform
+
+import (
+ "sync"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// MockEvalContext is a mock version of EvalContext that can be used
+// for tests.
+type MockEvalContext struct {
+ StoppedCalled bool
+ StoppedValue <-chan struct{}
+
+ HookCalled bool
+ HookHook Hook
+ HookError error
+
+ InputCalled bool
+ InputInput UIInput
+
+ InitProviderCalled bool
+ InitProviderName string
+ InitProviderProvider ResourceProvider
+ InitProviderError error
+
+ ProviderCalled bool
+ ProviderName string
+ ProviderProvider ResourceProvider
+
+ CloseProviderCalled bool
+ CloseProviderName string
+ CloseProviderProvider ResourceProvider
+
+ ProviderInputCalled bool
+ ProviderInputName string
+ ProviderInputConfig map[string]interface{}
+
+ SetProviderInputCalled bool
+ SetProviderInputName string
+ SetProviderInputConfig map[string]interface{}
+
+ ConfigureProviderCalled bool
+ ConfigureProviderName string
+ ConfigureProviderConfig *ResourceConfig
+ ConfigureProviderError error
+
+ SetProviderConfigCalled bool
+ SetProviderConfigName string
+ SetProviderConfigConfig *ResourceConfig
+
+ ParentProviderConfigCalled bool
+ ParentProviderConfigName string
+ ParentProviderConfigConfig *ResourceConfig
+
+ InitProvisionerCalled bool
+ InitProvisionerName string
+ InitProvisionerProvisioner ResourceProvisioner
+ InitProvisionerError error
+
+ ProvisionerCalled bool
+ ProvisionerName string
+ ProvisionerProvisioner ResourceProvisioner
+
+ CloseProvisionerCalled bool
+ CloseProvisionerName string
+ CloseProvisionerProvisioner ResourceProvisioner
+
+ InterpolateCalled bool
+ InterpolateConfig *config.RawConfig
+ InterpolateResource *Resource
+ InterpolateConfigResult *ResourceConfig
+ InterpolateError error
+
+ PathCalled bool
+ PathPath []string
+
+ SetVariablesCalled bool
+ SetVariablesModule string
+ SetVariablesVariables map[string]interface{}
+
+ DiffCalled bool
+ DiffDiff *Diff
+ DiffLock *sync.RWMutex
+
+ StateCalled bool
+ StateState *State
+ StateLock *sync.RWMutex
+}
+
+func (c *MockEvalContext) Stopped() <-chan struct{} {
+ c.StoppedCalled = true
+ return c.StoppedValue
+}
+
+func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
+ c.HookCalled = true
+ if c.HookHook != nil {
+ if _, err := fn(c.HookHook); err != nil {
+ return err
+ }
+ }
+
+ return c.HookError
+}
+
+func (c *MockEvalContext) Input() UIInput {
+ c.InputCalled = true
+ return c.InputInput
+}
+
+func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) {
+ c.InitProviderCalled = true
+ c.InitProviderName = n
+ return c.InitProviderProvider, c.InitProviderError
+}
+
+func (c *MockEvalContext) Provider(n string) ResourceProvider {
+ c.ProviderCalled = true
+ c.ProviderName = n
+ return c.ProviderProvider
+}
+
+func (c *MockEvalContext) CloseProvider(n string) error {
+ c.CloseProviderCalled = true
+ c.CloseProviderName = n
+ return nil
+}
+
+func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error {
+ c.ConfigureProviderCalled = true
+ c.ConfigureProviderName = n
+ c.ConfigureProviderConfig = cfg
+ return c.ConfigureProviderError
+}
+
+func (c *MockEvalContext) SetProviderConfig(
+ n string, cfg *ResourceConfig) error {
+ c.SetProviderConfigCalled = true
+ c.SetProviderConfigName = n
+ c.SetProviderConfigConfig = cfg
+ return nil
+}
+
+func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig {
+ c.ParentProviderConfigCalled = true
+ c.ParentProviderConfigName = n
+ return c.ParentProviderConfigConfig
+}
+
+func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} {
+ c.ProviderInputCalled = true
+ c.ProviderInputName = n
+ return c.ProviderInputConfig
+}
+
+func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) {
+ c.SetProviderInputCalled = true
+ c.SetProviderInputName = n
+ c.SetProviderInputConfig = cfg
+}
+
+func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) {
+ c.InitProvisionerCalled = true
+ c.InitProvisionerName = n
+ return c.InitProvisionerProvisioner, c.InitProvisionerError
+}
+
+func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner {
+ c.ProvisionerCalled = true
+ c.ProvisionerName = n
+ return c.ProvisionerProvisioner
+}
+
+func (c *MockEvalContext) CloseProvisioner(n string) error {
+ c.CloseProvisionerCalled = true
+ c.CloseProvisionerName = n
+ return nil
+}
+
+func (c *MockEvalContext) Interpolate(
+ config *config.RawConfig, resource *Resource) (*ResourceConfig, error) {
+ c.InterpolateCalled = true
+ c.InterpolateConfig = config
+ c.InterpolateResource = resource
+ return c.InterpolateConfigResult, c.InterpolateError
+}
+
+func (c *MockEvalContext) Path() []string {
+ c.PathCalled = true
+ return c.PathPath
+}
+
+func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) {
+ c.SetVariablesCalled = true
+ c.SetVariablesModule = n
+ c.SetVariablesVariables = vs
+}
+
+func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) {
+ c.DiffCalled = true
+ return c.DiffDiff, c.DiffLock
+}
+
+func (c *MockEvalContext) State() (*State, *sync.RWMutex) {
+ c.StateCalled = true
+ return c.StateState, c.StateLock
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
new file mode 100644
index 00000000..2ae56a75
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
@@ -0,0 +1,58 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state
+// when there is a resource count with zero/one boundary, i.e. fixing
+// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
+type EvalCountFixZeroOneBoundary struct {
+ Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) {
+ // Get the count, important for knowing whether we're supposed to
+ // be adding the zero, or trimming it.
+ count, err := n.Resource.Count()
+ if err != nil {
+ return nil, err
+ }
+
+ // Figure what to look for and what to replace it with
+ hunt := n.Resource.Id()
+ replace := hunt + ".0"
+ if count < 2 {
+ hunt, replace = replace, hunt
+ }
+
+ state, lock := ctx.State()
+
+ // Get a lock so we can access this instance and potentially make
+ // changes to it.
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs, ok := mod.Resources[hunt]
+ if !ok {
+ return nil, nil
+ }
+
+ // If the replacement key exists, we just keep both
+ if _, ok := mod.Resources[replace]; ok {
+ return nil, nil
+ }
+
+ mod.Resources[replace] = rs
+ delete(mod.Resources, hunt)
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
new file mode 100644
index 00000000..91e2b904
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
@@ -0,0 +1,78 @@
+package terraform
+
+import (
+ "log"
+)
+
+// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state
+// when there is a resource count with zero/one boundary, i.e. fixing
+// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
+//
+// This works on the global state.
+type EvalCountFixZeroOneBoundaryGlobal struct{}
+
+// TODO: test
+func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) {
+ // Get the state and lock it since we'll potentially modify it
+ state, lock := ctx.State()
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Prune the state since we require a clean state to work
+ state.prune()
+
+ // Go through each modules since the boundaries are restricted to a
+ // module scope.
+ for _, m := range state.Modules {
+ if err := n.fixModule(m); err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error {
+ // Counts keeps track of keys and their counts
+ counts := make(map[string]int)
+ for k, _ := range m.Resources {
+ // Parse the key
+ key, err := ParseResourceStateKey(k)
+ if err != nil {
+ return err
+ }
+
+ // Set the index to -1 so that we can keep count
+ key.Index = -1
+
+ // Increment
+ counts[key.String()]++
+ }
+
+ // Go through the counts and do the fixup for each resource
+ for raw, count := range counts {
+ // Search and replace this resource
+ search := raw
+ replace := raw + ".0"
+ if count < 2 {
+ search, replace = replace, search
+ }
+ log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace)
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs, ok := m.Resources[search]
+ if !ok {
+ continue
+ }
+
+ // If the replacement key exists, we just keep both
+ if _, ok := m.Resources[replace]; ok {
+ continue
+ }
+
+ m.Resources[replace] = rs
+ delete(m.Resources, search)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
new file mode 100644
index 00000000..54a8333e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
@@ -0,0 +1,25 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalCountCheckComputed is an EvalNode that checks if a resource count
+// is computed and errors if so. This can possibly happen across a
+// module boundary and we don't yet support this.
+type EvalCountCheckComputed struct {
+ Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalCountCheckComputed) Eval(ctx EvalContext) (interface{}, error) {
+ if n.Resource.RawCount.Value() == unknownValue() {
+ return nil, fmt.Errorf(
+ "%s: value of 'count' cannot be computed",
+ n.Resource.Id())
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
new file mode 100644
index 00000000..6f09526a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
@@ -0,0 +1,478 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalCompareDiff is an EvalNode implementation that compares two diffs
+// and errors if the diffs are not equal.
+type EvalCompareDiff struct {
+ Info *InstanceInfo
+ One, Two **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {
+ one, two := *n.One, *n.Two
+
+ // If either are nil, let them be empty
+ if one == nil {
+ one = new(InstanceDiff)
+ one.init()
+ }
+ if two == nil {
+ two = new(InstanceDiff)
+ two.init()
+ }
+ oneId, _ := one.GetAttribute("id")
+ twoId, _ := two.GetAttribute("id")
+ one.DelAttribute("id")
+ two.DelAttribute("id")
+ defer func() {
+ if oneId != nil {
+ one.SetAttribute("id", oneId)
+ }
+ if twoId != nil {
+ two.SetAttribute("id", twoId)
+ }
+ }()
+
+ if same, reason := one.Same(two); !same {
+ log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id)
+ log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason)
+ log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one)
+ log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two)
+ return nil, fmt.Errorf(
+ "%s: diffs didn't match during apply. This is a bug with "+
+ "Terraform and should be reported as a GitHub Issue.\n"+
+ "\n"+
+ "Please include the following information in your report:\n"+
+ "\n"+
+ " Terraform Version: %s\n"+
+ " Resource ID: %s\n"+
+ " Mismatch reason: %s\n"+
+ " Diff One (usually from plan): %#v\n"+
+ " Diff Two (usually from apply): %#v\n"+
+ "\n"+
+ "Also include as much context as you can about your config, state, "+
+ "and the steps you performed to trigger this error.\n",
+ n.Info.Id, Version, n.Info.Id, reason, one, two)
+ }
+
+ return nil, nil
+}
+
+// EvalDiff is an EvalNode implementation that does a refresh for
+// a resource.
+type EvalDiff struct {
+ Name string
+ Info *InstanceInfo
+ Config **ResourceConfig
+ Provider *ResourceProvider
+ Diff **InstanceDiff
+ State **InstanceState
+ OutputDiff **InstanceDiff
+ OutputState **InstanceState
+
+ // Resource is needed to fetch the ignore_changes list so we can
+ // filter user-requested ignored attributes from the diff.
+ Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+ config := *n.Config
+ provider := *n.Provider
+
+ // Call pre-diff hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreDiff(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // The state for the diff must never be nil
+ diffState := state
+ if diffState == nil {
+ diffState = new(InstanceState)
+ }
+ diffState.init()
+
+ // Diff!
+ diff, err := provider.Diff(n.Info, diffState, config)
+ if err != nil {
+ return nil, err
+ }
+ if diff == nil {
+ diff = new(InstanceDiff)
+ }
+
+ // Set DestroyDeposed if we have deposed instances
+ _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) {
+ if len(rs.Deposed) > 0 {
+ diff.DestroyDeposed = true
+ }
+
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Preserve the DestroyTainted flag
+ if n.Diff != nil {
+ diff.SetTainted((*n.Diff).GetDestroyTainted())
+ }
+
+ // Require a destroy if there is an ID and it requires new.
+ if diff.RequiresNew() && state != nil && state.ID != "" {
+ diff.SetDestroy(true)
+ }
+
+ // If we're creating a new resource, compute its ID
+ if diff.RequiresNew() || state == nil || state.ID == "" {
+ var oldID string
+ if state != nil {
+ oldID = state.Attributes["id"]
+ }
+
+ // Add diff to compute new ID
+ diff.init()
+ diff.SetAttribute("id", &ResourceAttrDiff{
+ Old: oldID,
+ NewComputed: true,
+ RequiresNew: true,
+ Type: DiffAttrOutput,
+ })
+ }
+
+ // filter out ignored resources
+ if err := n.processIgnoreChanges(diff); err != nil {
+ return nil, err
+ }
+
+ // Call post-refresh hook
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostDiff(n.Info, diff)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Update our output
+ *n.OutputDiff = diff
+
+ // Update the state if we care
+ if n.OutputState != nil {
+ *n.OutputState = state
+
+ // Merge our state so that the state is updated with our plan
+ if !diff.Empty() && n.OutputState != nil {
+ *n.OutputState = state.MergeDiff(diff)
+ }
+ }
+
+ return nil, nil
+}
+
+func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
+ if diff == nil || n.Resource == nil || n.Resource.Id() == "" {
+ return nil
+ }
+ ignoreChanges := n.Resource.Lifecycle.IgnoreChanges
+
+ if len(ignoreChanges) == 0 {
+ return nil
+ }
+
+ // If we're just creating the resource, we shouldn't alter the
+ // Diff at all
+ if diff.ChangeType() == DiffCreate {
+ return nil
+ }
+
+ // If the resource has been tainted then we don't process ignore changes
+ // since we MUST recreate the entire resource.
+ if diff.GetDestroyTainted() {
+ return nil
+ }
+
+ attrs := diff.CopyAttributes()
+
+ // get the complete set of keys we want to ignore
+ ignorableAttrKeys := make(map[string]bool)
+ for _, ignoredKey := range ignoreChanges {
+ for k := range attrs {
+ if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) {
+ ignorableAttrKeys[k] = true
+ }
+ }
+ }
+
+ // If the resource was being destroyed, check to see if we can ignore the
+ // reason for it being destroyed.
+ if diff.GetDestroy() {
+ for k, v := range attrs {
+ if k == "id" {
+ // id will always be changed if we intended to replace this instance
+ continue
+ }
+ if v.Empty() || v.NewComputed {
+ continue
+ }
+
+ // If any RequiresNew attribute isn't ignored, we need to keep the diff
+ // as-is to be able to replace the resource.
+ if v.RequiresNew && !ignorableAttrKeys[k] {
+ return nil
+ }
+ }
+
+ // Now that we know that we aren't replacing the instance, we can filter
+ // out all the empty and computed attributes. There may be a bunch of
+ // extraneous attribute diffs for the other non-requires-new attributes
+ // going from "" -> "configval" or "" -> "<computed>".
+ // We must make sure any flatmapped containers are filterred (or not) as a
+ // whole.
+ containers := groupContainers(diff)
+ keep := map[string]bool{}
+ for _, v := range containers {
+ if v.keepDiff() {
+ // At least one key has changes, so list all the sibling keys
+ // to keep in the diff.
+ for k := range v {
+ keep[k] = true
+ }
+ }
+ }
+
+ for k, v := range attrs {
+ if (v.Empty() || v.NewComputed) && !keep[k] {
+ ignorableAttrKeys[k] = true
+ }
+ }
+ }
+
+ // Here we undo the two reactions to RequireNew in EvalDiff - the "id"
+ // attribute diff and the Destroy boolean field
+ log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " +
+ "because after ignore_changes, this diff no longer requires replacement")
+ diff.DelAttribute("id")
+ diff.SetDestroy(false)
+
+ // If we didn't hit any of our early exit conditions, we can filter the diff.
+ for k := range ignorableAttrKeys {
+ log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s",
+ n.Resource.Id(), k)
+ diff.DelAttribute(k)
+ }
+
+ return nil
+}
+
+// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
+type flatAttrDiff map[string]*ResourceAttrDiff
+
+// we need to keep all keys if any of them have a diff
+func (f flatAttrDiff) keepDiff() bool {
+ for _, v := range f {
+ if !v.Empty() && !v.NewComputed {
+ return true
+ }
+ }
+ return false
+}
+
+// sets, lists and maps need to be compared for diff inclusion as a whole, so
+// group the flatmapped keys together for easier comparison.
+func groupContainers(d *InstanceDiff) map[string]flatAttrDiff {
+ isIndex := multiVal.MatchString
+ containers := map[string]flatAttrDiff{}
+ attrs := d.CopyAttributes()
+ // we need to loop once to find the index key
+ for k := range attrs {
+ if isIndex(k) {
+ // add the key, always including the final dot to fully qualify it
+ containers[k[:len(k)-1]] = flatAttrDiff{}
+ }
+ }
+
+ // loop again to find all the sub keys
+ for prefix, values := range containers {
+ for k, attrDiff := range attrs {
+ // we include the index value as well, since it could be part of the diff
+ if strings.HasPrefix(k, prefix) {
+ values[k] = attrDiff
+ }
+ }
+ }
+
+ return containers
+}
+
+// EvalDiffDestroy is an EvalNode implementation that returns a plain
+// destroy diff.
+type EvalDiffDestroy struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Output **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+
+ // If there is no state or we don't have an ID, we're already destroyed
+ if state == nil || state.ID == "" {
+ return nil, nil
+ }
+
+ // Call pre-diff hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreDiff(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // The diff
+ diff := &InstanceDiff{Destroy: true}
+
+ // Call post-diff hook
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostDiff(n.Info, diff)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Update our output
+ *n.Output = diff
+
+ return nil, nil
+}
+
+// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalDiffDestroyModule struct {
+ Path []string
+}
+
+// TODO: test
+func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) {
+ diff, lock := ctx.Diff()
+
+ // Acquire the lock so that we can do this safely concurrently
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Write the diff
+ modDiff := diff.ModuleByPath(n.Path)
+ if modDiff == nil {
+ modDiff = diff.AddModule(n.Path)
+ }
+ modDiff.Destroy = true
+
+ return nil, nil
+}
+
+// EvalFilterDiff is an EvalNode implementation that filters the diff
+// according to some filter.
+type EvalFilterDiff struct {
+ // Input and output
+ Diff **InstanceDiff
+ Output **InstanceDiff
+
+ // Destroy, if true, will only include a destroy diff if it is set.
+ Destroy bool
+}
+
+func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) {
+ if *n.Diff == nil {
+ return nil, nil
+ }
+
+ input := *n.Diff
+ result := new(InstanceDiff)
+
+ if n.Destroy {
+ if input.GetDestroy() || input.RequiresNew() {
+ result.SetDestroy(true)
+ }
+ }
+
+ if n.Output != nil {
+ *n.Output = result
+ }
+
+ return nil, nil
+}
+
+// EvalReadDiff is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalReadDiff struct {
+ Name string
+ Diff **InstanceDiff
+}
+
+func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {
+ diff, lock := ctx.Diff()
+
+ // Acquire the lock so that we can do this safely concurrently
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Write the diff
+ modDiff := diff.ModuleByPath(ctx.Path())
+ if modDiff == nil {
+ return nil, nil
+ }
+
+ *n.Diff = modDiff.Resources[n.Name]
+
+ return nil, nil
+}
+
+// EvalWriteDiff is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalWriteDiff struct {
+ Name string
+ Diff **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {
+ diff, lock := ctx.Diff()
+
+ // The diff to write, if its empty it should write nil
+ var diffVal *InstanceDiff
+ if n.Diff != nil {
+ diffVal = *n.Diff
+ }
+ if diffVal.Empty() {
+ diffVal = nil
+ }
+
+ // Acquire the lock so that we can do this safely concurrently
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Write the diff
+ modDiff := diff.ModuleByPath(ctx.Path())
+ if modDiff == nil {
+ modDiff = diff.AddModule(ctx.Path())
+ }
+ if diffVal != nil {
+ modDiff.Resources[n.Name] = diffVal
+ } else {
+ delete(modDiff.Resources, n.Name)
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
new file mode 100644
index 00000000..470f798b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
@@ -0,0 +1,20 @@
+package terraform
+
+// EvalReturnError is an EvalNode implementation that returns an
+// error if it is present.
+//
+// This is useful for scenarios where an error has been captured by
+// another EvalNode (like EvalApply) for special EvalTree-based error
+// handling, and that handling has completed, so the error should be
+// returned normally.
+type EvalReturnError struct {
+ Error *error
+}
+
+func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) {
+ if n.Error == nil {
+ return nil, nil
+ }
+
+ return nil, *n.Error
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
new file mode 100644
index 00000000..711c625c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
@@ -0,0 +1,25 @@
+package terraform
+
+// EvalNodeFilterFunc is the callback used to replace a node with
+// another to node. To not do the replacement, just return the input node.
+type EvalNodeFilterFunc func(EvalNode) EvalNode
+
+// EvalNodeFilterable is an interface that can be implemented by
+// EvalNodes to allow filtering of sub-elements. Note that this isn't
+// a common thing to implement and you probably don't need it.
+type EvalNodeFilterable interface {
+ EvalNode
+ Filter(EvalNodeFilterFunc)
+}
+
+// EvalFilter runs the filter on the given node and returns the
+// final filtered value. This should be called rather than checking
+// the EvalNode directly since this will properly handle EvalNodeFilterables.
+func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode {
+ if f, ok := node.(EvalNodeFilterable); ok {
+ f.Filter(fn)
+ return node
+ }
+
+ return fn(node)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
new file mode 100644
index 00000000..1a55f024
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
@@ -0,0 +1,49 @@
+package terraform
+
+// EvalNodeOpFilterable is an interface that EvalNodes can implement
+// to be filterable by the operation that is being run on Terraform.
+type EvalNodeOpFilterable interface {
+ IncludeInOp(walkOperation) bool
+}
+
+// EvalNodeFilterOp returns a filter function that filters nodes that
+// include themselves in specific operations.
+func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc {
+ return func(n EvalNode) EvalNode {
+ include := true
+ if of, ok := n.(EvalNodeOpFilterable); ok {
+ include = of.IncludeInOp(op)
+ }
+ if include {
+ return n
+ }
+
+ return EvalNoop{}
+ }
+}
+
+// EvalOpFilter is an EvalNode implementation that is a proxy to
+// another node but filters based on the operation.
+type EvalOpFilter struct {
+ // Ops is the list of operations to include this node in.
+ Ops []walkOperation
+
+ // Node is the node to execute
+ Node EvalNode
+}
+
+// TODO: test
+func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) {
+ return EvalRaw(n.Node, ctx)
+}
+
+// EvalNodeOpFilterable impl.
+func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool {
+ for _, v := range n.Ops {
+ if v == op {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
new file mode 100644
index 00000000..d6b46a1f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
@@ -0,0 +1,26 @@
+package terraform
+
+// EvalIf is an EvalNode that is a conditional.
+type EvalIf struct {
+ If func(EvalContext) (bool, error)
+ Then EvalNode
+ Else EvalNode
+}
+
+// TODO: test
+func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) {
+ yes, err := n.If(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if yes {
+ return EvalRaw(n.Then, ctx)
+ } else {
+ if n.Else != nil {
+ return EvalRaw(n.Else, ctx)
+ }
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
new file mode 100644
index 00000000..62cc581f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
@@ -0,0 +1,76 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// EvalImportState is an EvalNode implementation that performs an
+// ImportState operation on a provider. This will return the imported
+// states but won't modify any actual state.
+type EvalImportState struct {
+ Provider *ResourceProvider
+ Info *InstanceInfo
+ Id string
+ Output *[]*InstanceState
+}
+
+// TODO: test
+func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) {
+ provider := *n.Provider
+
+ {
+ // Call pre-import hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreImportState(n.Info, n.Id)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Import!
+ state, err := provider.ImportState(n.Info, n.Id)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err)
+ }
+
+ if n.Output != nil {
+ *n.Output = state
+ }
+
+ {
+ // Call post-import hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostImportState(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalImportStateVerify verifies the state after ImportState and
+// after the refresh to make sure it is non-nil and valid.
+type EvalImportStateVerify struct {
+ Info *InstanceInfo
+ Id string
+ State **InstanceState
+}
+
+// TODO: test
+func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+ if state.Empty() {
+ return nil, fmt.Errorf(
+ "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+
+ "exist. Please verify the ID is correct. You cannot import non-existent\n"+
+ "resources using Terraform import.",
+ n.Info.HumanId(),
+ n.Id)
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
new file mode 100644
index 00000000..6825ff59
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
@@ -0,0 +1,24 @@
+package terraform
+
+import "github.com/hashicorp/terraform/config"
+
+// EvalInterpolate is an EvalNode implementation that takes a raw
+// configuration and interpolates it.
+type EvalInterpolate struct {
+ Config *config.RawConfig
+ Resource *Resource
+ Output **ResourceConfig
+}
+
+func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) {
+ rc, err := ctx.Interpolate(n.Config, n.Resource)
+ if err != nil {
+ return nil, err
+ }
+
+ if n.Output != nil {
+ *n.Output = rc
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
new file mode 100644
index 00000000..f4bc8225
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
@@ -0,0 +1,8 @@
+package terraform
+
+// EvalNoop is an EvalNode that does nothing.
+type EvalNoop struct{}
+
+func (EvalNoop) Eval(EvalContext) (interface{}, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
new file mode 100644
index 00000000..cf61781e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
@@ -0,0 +1,119 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalDeleteOutput is an EvalNode implementation that deletes an output
+// from the state.
+type EvalDeleteOutput struct {
+ Name string
+}
+
+// TODO: test
+func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+ if state == nil {
+ return nil, nil
+ }
+
+ // Get a write lock so we can access this instance
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Look for the module state. If we don't have one, create it.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ delete(mod.Outputs, n.Name)
+
+ return nil, nil
+}
+
+// EvalWriteOutput is an EvalNode implementation that writes the output
+// for the given name to the current state.
+type EvalWriteOutput struct {
+ Name string
+ Sensitive bool
+ Value *config.RawConfig
+}
+
+// TODO: test
+func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
+ cfg, err := ctx.Interpolate(n.Value, nil)
+ if err != nil {
+ // Log error but continue anyway
+ log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err)
+ }
+
+ state, lock := ctx.State()
+ if state == nil {
+ return nil, fmt.Errorf("cannot write state to nil state")
+ }
+
+ // Get a write lock so we can access this instance
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Look for the module state. If we don't have one, create it.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ mod = state.AddModule(ctx.Path())
+ }
+
+ // Get the value from the config
+ var valueRaw interface{} = config.UnknownVariableValue
+ if cfg != nil {
+ var ok bool
+ valueRaw, ok = cfg.Get("value")
+ if !ok {
+ valueRaw = ""
+ }
+ if cfg.IsComputed("value") {
+ valueRaw = config.UnknownVariableValue
+ }
+ }
+
+ switch valueTyped := valueRaw.(type) {
+ case string:
+ mod.Outputs[n.Name] = &OutputState{
+ Type: "string",
+ Sensitive: n.Sensitive,
+ Value: valueTyped,
+ }
+ case []interface{}:
+ mod.Outputs[n.Name] = &OutputState{
+ Type: "list",
+ Sensitive: n.Sensitive,
+ Value: valueTyped,
+ }
+ case map[string]interface{}:
+ mod.Outputs[n.Name] = &OutputState{
+ Type: "map",
+ Sensitive: n.Sensitive,
+ Value: valueTyped,
+ }
+ case []map[string]interface{}:
+ // an HCL map is multi-valued, so if this was read out of a config the
+ // map may still be in a slice.
+ if len(valueTyped) == 1 {
+ mod.Outputs[n.Name] = &OutputState{
+ Type: "map",
+ Sensitive: n.Sensitive,
+ Value: valueTyped[0],
+ }
+ break
+ }
+ return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map",
+ n.Name, valueTyped, len(valueTyped))
+ default:
+ return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped)
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
new file mode 100644
index 00000000..092fd18d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
@@ -0,0 +1,164 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalSetProviderConfig sets the parent configuration for a provider
+// without configuring that provider, validating it, etc.
+type EvalSetProviderConfig struct {
+ Provider string
+ Config **ResourceConfig
+}
+
+func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
+ return nil, ctx.SetProviderConfig(n.Provider, *n.Config)
+}
+
+// EvalBuildProviderConfig outputs a *ResourceConfig that is properly
+// merged with parents and inputs on top of what is configured in the file.
+type EvalBuildProviderConfig struct {
+ Provider string
+ Config **ResourceConfig
+ Output **ResourceConfig
+}
+
+func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
+ cfg := *n.Config
+
+ // If we have a configuration set, then merge that in
+ if input := ctx.ProviderInput(n.Provider); input != nil {
+ // "input" is a map of the subset of config values that were known
+ // during the input walk, set by EvalInputProvider. Note that
+ // in particular it does *not* include attributes that had
+ // computed values at input time; those appear *only* in
+ // "cfg" here.
+ rc, err := config.NewRawConfig(input)
+ if err != nil {
+ return nil, err
+ }
+
+ merged := cfg.raw.Merge(rc)
+ cfg = NewResourceConfig(merged)
+ }
+
+ // Get the parent configuration if there is one
+ if parent := ctx.ParentProviderConfig(n.Provider); parent != nil {
+ merged := cfg.raw.Merge(parent.raw)
+ cfg = NewResourceConfig(merged)
+ }
+
+ *n.Output = cfg
+ return nil, nil
+}
+
+// EvalConfigProvider is an EvalNode implementation that configures
+// a provider that is already initialized and retrieved.
+type EvalConfigProvider struct {
+ Provider string
+ Config **ResourceConfig
+}
+
+func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
+ return nil, ctx.ConfigureProvider(n.Provider, *n.Config)
+}
+
+// EvalInitProvider is an EvalNode implementation that initializes a provider
+// and returns nothing. The provider can be retrieved again with the
+// EvalGetProvider node.
+type EvalInitProvider struct {
+ Name string
+}
+
+func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) {
+ return ctx.InitProvider(n.Name)
+}
+
+// EvalCloseProvider is an EvalNode implementation that closes provider
+// connections that aren't needed anymore.
+type EvalCloseProvider struct {
+ Name string
+}
+
+func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) {
+ ctx.CloseProvider(n.Name)
+ return nil, nil
+}
+
+// EvalGetProvider is an EvalNode implementation that retrieves an already
+// initialized provider instance for the given name.
+type EvalGetProvider struct {
+ Name string
+ Output *ResourceProvider
+}
+
+func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) {
+ result := ctx.Provider(n.Name)
+ if result == nil {
+ return nil, fmt.Errorf("provider %s not initialized", n.Name)
+ }
+
+ if n.Output != nil {
+ *n.Output = result
+ }
+
+ return nil, nil
+}
+
+// EvalInputProvider is an EvalNode implementation that asks for input
+// for the given provider configurations.
+type EvalInputProvider struct {
+ Name string
+ Provider *ResourceProvider
+ Config **ResourceConfig
+}
+
+func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) {
+ // If we already configured this provider, then don't do this again
+ if v := ctx.ProviderInput(n.Name); v != nil {
+ return nil, nil
+ }
+
+ rc := *n.Config
+
+ // Wrap the input into a namespace
+ input := &PrefixUIInput{
+ IdPrefix: fmt.Sprintf("provider.%s", n.Name),
+ QueryPrefix: fmt.Sprintf("provider.%s.", n.Name),
+ UIInput: ctx.Input(),
+ }
+
+ // Go through each provider and capture the input necessary
+ // to satisfy it.
+ config, err := (*n.Provider).Input(input, rc)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error configuring %s: %s", n.Name, err)
+ }
+
+ // Set the input that we received so that child modules don't attempt
+ // to ask for input again.
+ if config != nil && len(config.Config) > 0 {
+ // This repository of provider input results on the context doesn't
+ // retain config.ComputedKeys, so we need to filter those out here
+ // in order that later users of this data won't try to use the unknown
+ // value placeholder as if it were a literal value. This map is just
+ // of known values we've been able to complete so far; dynamic stuff
+ // will be merged in by EvalBuildProviderConfig on subsequent
+ // (post-input) walks.
+ confMap := config.Config
+ if config.ComputedKeys != nil {
+ for _, key := range config.ComputedKeys {
+ delete(confMap, key)
+ }
+ }
+
+ ctx.SetProviderInput(n.Name, confMap)
+ } else {
+ ctx.SetProviderInput(n.Name, map[string]interface{}{})
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
new file mode 100644
index 00000000..89579c05
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
@@ -0,0 +1,47 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner
+// and returns nothing. The provisioner can be retrieved again with the
+// EvalGetProvisioner node.
+type EvalInitProvisioner struct {
+ Name string
+}
+
+func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+ return ctx.InitProvisioner(n.Name)
+}
+
+// EvalCloseProvisioner is an EvalNode implementation that closes provisioner
+// connections that aren't needed anymore.
+type EvalCloseProvisioner struct {
+ Name string
+}
+
+func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+ ctx.CloseProvisioner(n.Name)
+ return nil, nil
+}
+
+// EvalGetProvisioner is an EvalNode implementation that retrieves an already
+// initialized provisioner instance for the given name.
+type EvalGetProvisioner struct {
+ Name string
+ Output *ResourceProvisioner
+}
+
+func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+ result := ctx.Provisioner(n.Name)
+ if result == nil {
+ return nil, fmt.Errorf("provisioner %s not initialized", n.Name)
+ }
+
+ if n.Output != nil {
+ *n.Output = result
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
new file mode 100644
index 00000000..fb85a284
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
@@ -0,0 +1,139 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// EvalReadDataDiff is an EvalNode implementation that executes a data
+// resource's ReadDataDiff method to discover what attributes it exports.
+type EvalReadDataDiff struct {
+ Provider *ResourceProvider
+ Output **InstanceDiff
+ OutputState **InstanceState
+ Config **ResourceConfig
+ Info *InstanceInfo
+
+ // Set Previous when re-evaluating diff during apply, to ensure that
+ // the "Destroy" flag is preserved.
+ Previous **InstanceDiff
+}
+
+func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) {
+ // TODO: test
+
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreDiff(n.Info, nil)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ var diff *InstanceDiff
+
+ if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() {
+ // If we're re-diffing for a diff that was already planning to
+ // destroy, then we'll just continue with that plan.
+ diff = &InstanceDiff{Destroy: true}
+ } else {
+ provider := *n.Provider
+ config := *n.Config
+
+ var err error
+ diff, err = provider.ReadDataDiff(n.Info, config)
+ if err != nil {
+ return nil, err
+ }
+ if diff == nil {
+ diff = new(InstanceDiff)
+ }
+
+ // if id isn't explicitly set then it's always computed, because we're
+ // always "creating a new resource".
+ diff.init()
+ if _, ok := diff.Attributes["id"]; !ok {
+ diff.SetAttribute("id", &ResourceAttrDiff{
+ Old: "",
+ NewComputed: true,
+ RequiresNew: true,
+ Type: DiffAttrOutput,
+ })
+ }
+ }
+
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostDiff(n.Info, diff)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ *n.Output = diff
+
+ if n.OutputState != nil {
+ state := &InstanceState{}
+ *n.OutputState = state
+
+ // Apply the diff to the returned state, so the state includes
+ // any attribute values that are not computed.
+ if !diff.Empty() && n.OutputState != nil {
+ *n.OutputState = state.MergeDiff(diff)
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalReadDataApply is an EvalNode implementation that executes a data
+// resource's ReadDataApply method to read data from the data source.
+type EvalReadDataApply struct {
+ Provider *ResourceProvider
+ Output **InstanceState
+ Diff **InstanceDiff
+ Info *InstanceInfo
+}
+
+func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
+ // TODO: test
+ provider := *n.Provider
+ diff := *n.Diff
+
+ // If the diff is for *destroying* this resource then we'll
+ // just drop its state and move on, since data resources don't
+ // support an actual "destroy" action.
+ if diff != nil && diff.GetDestroy() {
+ if n.Output != nil {
+ *n.Output = nil
+ }
+ return nil, nil
+ }
+
+ // For the purpose of external hooks we present a data apply as a
+ // "Refresh" rather than an "Apply" because creating a data source
+ // is presented to users/callers as a "read" operation.
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ // We don't have a state yet, so we'll just give the hook an
+ // empty one to work with.
+ return h.PreRefresh(n.Info, &InstanceState{})
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ state, err := provider.ReadDataApply(n.Info, diff)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %s", n.Info.Id, err)
+ }
+
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostRefresh(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if n.Output != nil {
+ *n.Output = state
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
new file mode 100644
index 00000000..fa2b8126
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
@@ -0,0 +1,55 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+)
+
+// EvalRefresh is an EvalNode implementation that does a refresh for
+// a resource.
+type EvalRefresh struct {
+ Provider *ResourceProvider
+ State **InstanceState
+ Info *InstanceInfo
+ Output **InstanceState
+}
+
+// TODO: test
+func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
+ provider := *n.Provider
+ state := *n.State
+
+ // If we have no state, we don't do any refreshing
+ if state == nil {
+ log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id)
+ return nil, nil
+ }
+
+ // Call pre-refresh hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreRefresh(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Refresh!
+ state, err = provider.Refresh(n.Info, state)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error())
+ }
+
+ // Call post-refresh hook
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostRefresh(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if n.Output != nil {
+ *n.Output = state
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
new file mode 100644
index 00000000..5eca6782
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
@@ -0,0 +1,13 @@
+package terraform
+
+// EvalInstanceInfo is an EvalNode implementation that fills in the
+// InstanceInfo as much as it can.
+type EvalInstanceInfo struct {
+ Info *InstanceInfo
+}
+
+// TODO: test
+func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) {
+ n.Info.ModulePath = ctx.Path()
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
new file mode 100644
index 00000000..82d81782
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
@@ -0,0 +1,27 @@
+package terraform
+
+// EvalSequence is an EvalNode that evaluates in sequence.
+type EvalSequence struct {
+ Nodes []EvalNode
+}
+
+func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) {
+ for _, n := range n.Nodes {
+ if n == nil {
+ continue
+ }
+
+ if _, err := EvalRaw(n, ctx); err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalNodeFilterable impl.
+func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) {
+ for i, node := range n.Nodes {
+ n.Nodes[i] = fn(node)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
new file mode 100644
index 00000000..126a0e63
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
@@ -0,0 +1,324 @@
+package terraform
+
+import "fmt"
+
+// EvalReadState is an EvalNode implementation that reads the
+// primary InstanceState for a specific resource out of the state.
+type EvalReadState struct {
+ Name string
+ Output **InstanceState
+}
+
+func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) {
+ return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
+ return rs.Primary, nil
+ })
+}
+
+// EvalReadStateDeposed is an EvalNode implementation that reads the
+// deposed InstanceState for a specific resource out of the state
+type EvalReadStateDeposed struct {
+ Name string
+ Output **InstanceState
+ // Index indicates which instance in the Deposed list to target, or -1 for
+ // the last item.
+ Index int
+}
+
+func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
+ return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
+ // Get the index. If it is negative, then we get the last one
+ idx := n.Index
+ if idx < 0 {
+ idx = len(rs.Deposed) - 1
+ }
+ if idx >= 0 && idx < len(rs.Deposed) {
+ return rs.Deposed[idx], nil
+ } else {
+ return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs)
+ }
+ })
+}
+
+// Does the bulk of the work for the various flavors of ReadState eval nodes.
+// Each node just provides a reader function to get from the ResourceState to the
+// InstanceState, and this takes care of all the plumbing.
+func readInstanceFromState(
+ ctx EvalContext,
+ resourceName string,
+ output **InstanceState,
+ readerFn func(*ResourceState) (*InstanceState, error),
+) (*InstanceState, error) {
+ state, lock := ctx.State()
+
+ // Get a read lock so we can access this instance
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs := mod.Resources[resourceName]
+ if rs == nil {
+ return nil, nil
+ }
+
+ // Use the delegate function to get the instance state from the resource state
+ is, err := readerFn(rs)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write the result to the output pointer
+ if output != nil {
+ *output = is
+ }
+
+ return is, nil
+}
+
+// EvalRequireState is an EvalNode implementation that early exits
+// if the state doesn't have an ID.
+type EvalRequireState struct {
+ State **InstanceState
+}
+
+func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
+ if n.State == nil {
+ return nil, EvalEarlyExitError{}
+ }
+
+ state := *n.State
+ if state == nil || state.ID == "" {
+ return nil, EvalEarlyExitError{}
+ }
+
+ return nil, nil
+}
+
+// EvalUpdateStateHook is an EvalNode implementation that calls the
+// PostStateUpdate hook with the current state.
+type EvalUpdateStateHook struct{}
+
+func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+
+ // Get a full lock. Even calling something like WriteState can modify
+ // (prune) the state, so we need the full lock.
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Call the hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostStateUpdate(state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// EvalWriteState is an EvalNode implementation that writes the
+// primary InstanceState for a specific resource into the state.
+type EvalWriteState struct {
+ Name string
+ ResourceType string
+ Provider string
+ Dependencies []string
+ State **InstanceState
+}
+
+func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) {
+ return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
+ func(rs *ResourceState) error {
+ rs.Primary = *n.State
+ return nil
+ },
+ )
+}
+
+// EvalWriteStateDeposed is an EvalNode implementation that writes
+// an InstanceState out to the Deposed list of a resource in the state.
+type EvalWriteStateDeposed struct {
+ Name string
+ ResourceType string
+ Provider string
+ Dependencies []string
+ State **InstanceState
+ // Index indicates which instance in the Deposed list to target, or -1 to append.
+ Index int
+}
+
+func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
+ return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
+ func(rs *ResourceState) error {
+ if n.Index == -1 {
+ rs.Deposed = append(rs.Deposed, *n.State)
+ } else {
+ rs.Deposed[n.Index] = *n.State
+ }
+ return nil
+ },
+ )
+}
+
+// Pulls together the common tasks of the EvalWriteState nodes. All the args
+// are passed directly down from the EvalNode along with a `writer` function
+// which is yielded the *ResourceState and is responsible for writing an
+// InstanceState to the proper field in the ResourceState.
+func writeInstanceToState(
+ ctx EvalContext,
+ resourceName string,
+ resourceType string,
+ provider string,
+ dependencies []string,
+ writerFn func(*ResourceState) error,
+) (*InstanceState, error) {
+ state, lock := ctx.State()
+ if state == nil {
+ return nil, fmt.Errorf("cannot write state to nil state")
+ }
+
+ // Get a write lock so we can access this instance
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Look for the module state. If we don't have one, create it.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ mod = state.AddModule(ctx.Path())
+ }
+
+ // Look for the resource state.
+ rs := mod.Resources[resourceName]
+ if rs == nil {
+ rs = &ResourceState{}
+ rs.init()
+ mod.Resources[resourceName] = rs
+ }
+ rs.Type = resourceType
+ rs.Dependencies = dependencies
+ rs.Provider = provider
+
+ if err := writerFn(rs); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// EvalClearPrimaryState is an EvalNode implementation that clears the primary
+// instance from a resource state.
+type EvalClearPrimaryState struct {
+ Name string
+}
+
+func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+
+ // Get a read lock so we can access this instance
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs := mod.Resources[n.Name]
+ if rs == nil {
+ return nil, nil
+ }
+
+ // Clear primary from the resource state
+ rs.Primary = nil
+
+ return nil, nil
+}
+
+// EvalDeposeState is an EvalNode implementation that takes the primary
+// out of a state and makes it Deposed. This is done at the beginning of
+// create-before-destroy calls so that the create can create while preserving
+// the old state of the to-be-destroyed resource.
+type EvalDeposeState struct {
+ Name string
+}
+
+// TODO: test
+func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+
+ // Get a read lock so we can access this instance
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs := mod.Resources[n.Name]
+ if rs == nil {
+ return nil, nil
+ }
+
+ // If we don't have a primary, we have nothing to depose
+ if rs.Primary == nil {
+ return nil, nil
+ }
+
+ // Depose
+ rs.Deposed = append(rs.Deposed, rs.Primary)
+ rs.Primary = nil
+
+ return nil, nil
+}
+
+// EvalUndeposeState is an EvalNode implementation that reads the
+// InstanceState for a specific resource out of the state.
+type EvalUndeposeState struct {
+ Name string
+ State **InstanceState
+}
+
+// TODO: test
+func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+
+ // Get a read lock so we can access this instance
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs := mod.Resources[n.Name]
+ if rs == nil {
+ return nil, nil
+ }
+
+ // If we don't have any desposed resource, then we don't have anything to do
+ if len(rs.Deposed) == 0 {
+ return nil, nil
+ }
+
+ // Undepose
+ idx := len(rs.Deposed) - 1
+ rs.Primary = rs.Deposed[idx]
+ rs.Deposed[idx] = *n.State
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
new file mode 100644
index 00000000..478aa640
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
@@ -0,0 +1,227 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/mitchellh/mapstructure"
+)
+
+// EvalValidateError is the error structure returned if there were
+// validation errors.
+type EvalValidateError struct {
+ Warnings []string
+ Errors []error
+}
+
+func (e *EvalValidateError) Error() string {
+ return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors)
+}
+
+// EvalValidateCount is an EvalNode implementation that validates
+// the count of a resource.
+type EvalValidateCount struct {
+ Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) {
+ var count int
+ var errs []error
+ var err error
+ if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil {
+ errs = append(errs, fmt.Errorf(
+ "Failed to interpolate count: %s", err))
+ goto RETURN
+ }
+
+ count, err = n.Resource.Count()
+ if err != nil {
+ // If we can't get the count during validation, then
+ // just replace it with the number 1.
+ c := n.Resource.RawCount.Config()
+ c[n.Resource.RawCount.Key] = "1"
+ count = 1
+ }
+ err = nil
+
+ if count < 0 {
+ errs = append(errs, fmt.Errorf(
+ "Count is less than zero: %d", count))
+ }
+
+RETURN:
+ if len(errs) != 0 {
+ err = &EvalValidateError{
+ Errors: errs,
+ }
+ }
+ return nil, err
+}
+
+// EvalValidateProvider is an EvalNode implementation that validates
+// the configuration of a resource.
+type EvalValidateProvider struct {
+ Provider *ResourceProvider
+ Config **ResourceConfig
+}
+
+func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
+ provider := *n.Provider
+ config := *n.Config
+
+ warns, errs := provider.Validate(config)
+ if len(warns) == 0 && len(errs) == 0 {
+ return nil, nil
+ }
+
+ return nil, &EvalValidateError{
+ Warnings: warns,
+ Errors: errs,
+ }
+}
+
+// EvalValidateProvisioner is an EvalNode implementation that validates
+// the configuration of a resource.
+type EvalValidateProvisioner struct {
+ Provisioner *ResourceProvisioner
+ Config **ResourceConfig
+ ConnConfig **ResourceConfig
+}
+
+func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+ provisioner := *n.Provisioner
+ config := *n.Config
+ var warns []string
+ var errs []error
+
+ {
+ // Validate the provisioner's own config first
+ w, e := provisioner.Validate(config)
+ warns = append(warns, w...)
+ errs = append(errs, e...)
+ }
+
+ {
+ // Now validate the connection config, which might either be from
+ // the provisioner block itself or inherited from the resource's
+ // shared connection info.
+ w, e := n.validateConnConfig(*n.ConnConfig)
+ warns = append(warns, w...)
+ errs = append(errs, e...)
+ }
+
+ if len(warns) == 0 && len(errs) == 0 {
+ return nil, nil
+ }
+
+ return nil, &EvalValidateError{
+ Warnings: warns,
+ Errors: errs,
+ }
+}
+
+func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) {
+ // We can't comprehensively validate the connection config since its
+ // final structure is decided by the communicator and we can't instantiate
+ // that until we have a complete instance state. However, we *can* catch
+ // configuration keys that are not valid for *any* communicator, catching
+ // typos early rather than waiting until we actually try to run one of
+ // the resource's provisioners.
+
+ type connConfigSuperset struct {
+ // All attribute types are interface{} here because at this point we
+ // may still have unresolved interpolation expressions, which will
+ // appear as strings regardless of the final goal type.
+
+ Type interface{} `mapstructure:"type"`
+ User interface{} `mapstructure:"user"`
+ Password interface{} `mapstructure:"password"`
+ Host interface{} `mapstructure:"host"`
+ Port interface{} `mapstructure:"port"`
+ Timeout interface{} `mapstructure:"timeout"`
+ ScriptPath interface{} `mapstructure:"script_path"`
+
+ // For type=ssh only (enforced in ssh communicator)
+ PrivateKey interface{} `mapstructure:"private_key"`
+ Agent interface{} `mapstructure:"agent"`
+ BastionHost interface{} `mapstructure:"bastion_host"`
+ BastionPort interface{} `mapstructure:"bastion_port"`
+ BastionUser interface{} `mapstructure:"bastion_user"`
+ BastionPassword interface{} `mapstructure:"bastion_password"`
+ BastionPrivateKey interface{} `mapstructure:"bastion_private_key"`
+
+ // For type=winrm only (enforced in winrm communicator)
+ HTTPS interface{} `mapstructure:"https"`
+ Insecure interface{} `mapstructure:"insecure"`
+ CACert interface{} `mapstructure:"cacert"`
+ }
+
+ var metadata mapstructure.Metadata
+ decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
+ Metadata: &metadata,
+ Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys
+ })
+ if err != nil {
+ // should never happen
+ errs = append(errs, err)
+ return
+ }
+
+ if err := decoder.Decode(connConfig.Config); err != nil {
+ errs = append(errs, err)
+ return
+ }
+
+ for _, attrName := range metadata.Unused {
+ errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName))
+ }
+ return
+}
+
+// EvalValidateResource is an EvalNode implementation that validates
+// the configuration of a resource.
+type EvalValidateResource struct {
+ Provider *ResourceProvider
+ Config **ResourceConfig
+ ResourceName string
+ ResourceType string
+ ResourceMode config.ResourceMode
+
+ // IgnoreWarnings means that warnings will not be passed through. This allows
+ // "just-in-time" passes of validation to continue execution through warnings.
+ IgnoreWarnings bool
+}
+
+func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
+ provider := *n.Provider
+ cfg := *n.Config
+ var warns []string
+ var errs []error
+ // Provider entry point varies depending on resource mode, because
+ // managed resources and data resources are two distinct concepts
+ // in the provider abstraction.
+ switch n.ResourceMode {
+ case config.ManagedResourceMode:
+ warns, errs = provider.ValidateResource(n.ResourceType, cfg)
+ case config.DataResourceMode:
+ warns, errs = provider.ValidateDataSource(n.ResourceType, cfg)
+ }
+
+ // If the resource name doesn't match the name regular
+ // expression, show an error.
+ if !config.NameRegexp.Match([]byte(n.ResourceName)) {
+ errs = append(errs, fmt.Errorf(
+ "%s: resource name can only contain letters, numbers, "+
+ "dashes, and underscores.", n.ResourceName))
+ }
+
+ if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 {
+ return nil, nil
+ }
+
+ return nil, &EvalValidateError{
+ Warnings: warns,
+ Errors: errs,
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
new file mode 100644
index 00000000..ae4436a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
@@ -0,0 +1,74 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalValidateResourceSelfRef is an EvalNode implementation that validates that
+// a configuration doesn't contain a reference to the resource itself.
+//
+// This must be done prior to interpolating configuration in order to avoid
+// any infinite loop scenarios.
+type EvalValidateResourceSelfRef struct {
+ Addr **ResourceAddress
+ Config **config.RawConfig
+}
+
+func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) {
+ addr := *n.Addr
+ conf := *n.Config
+
+ // Go through the variables and find self references
+ var errs []error
+ for k, raw := range conf.Variables {
+ rv, ok := raw.(*config.ResourceVariable)
+ if !ok {
+ continue
+ }
+
+ // Build an address from the variable
+ varAddr := &ResourceAddress{
+ Path: addr.Path,
+ Mode: rv.Mode,
+ Type: rv.Type,
+ Name: rv.Name,
+ Index: rv.Index,
+ InstanceType: TypePrimary,
+ }
+
+ // If the variable access is a multi-access (*), then we just
+ // match the index so that we'll match our own addr if everything
+ // else matches.
+ if rv.Multi && rv.Index == -1 {
+ varAddr.Index = addr.Index
+ }
+
+ // This is a weird thing where ResourceAddres has index "-1" when
+ // index isn't set at all. This means index "0" for resource access.
+ // So, if we have this scenario, just set our varAddr to -1 so it
+ // matches.
+ if addr.Index == -1 && varAddr.Index == 0 {
+ varAddr.Index = -1
+ }
+
+ // If the addresses match, then this is a self reference
+ if varAddr.Equals(addr) && varAddr.Index == addr.Index {
+ errs = append(errs, fmt.Errorf(
+ "%s: self reference not allowed: %q",
+ addr, k))
+ }
+ }
+
+ // If no errors, no errors!
+ if len(errs) == 0 {
+ return nil, nil
+ }
+
+ // Wrap the errors in the proper wrapper so we can handle validation
+ // formatting properly upstream.
+ return nil, &EvalValidateError{
+ Errors: errs,
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
new file mode 100644
index 00000000..e39a33c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
@@ -0,0 +1,279 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/hilmapstructure"
+)
+
+// EvalTypeCheckVariable is an EvalNode which ensures that the variable
+// values which are assigned as inputs to a module (including the root)
+// match the types which are either declared for the variables explicitly
+// or inferred from the default values.
+//
+// In order to achieve this three things are required:
+// - a map of the proposed variable values
+// - the configuration tree of the module in which the variable is
+// declared
+// - the path to the module (so we know which part of the tree to
+// compare the values against).
+type EvalTypeCheckVariable struct {
+ Variables map[string]interface{}
+ ModulePath []string
+ ModuleTree *module.Tree
+}
+
+func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) {
+ currentTree := n.ModuleTree
+ for _, pathComponent := range n.ModulePath[1:] {
+ currentTree = currentTree.Children()[pathComponent]
+ }
+ targetConfig := currentTree.Config()
+
+ prototypes := make(map[string]config.VariableType)
+ for _, variable := range targetConfig.Variables {
+ prototypes[variable.Name] = variable.Type()
+ }
+
+ // Only display a module in an error message if we are not in the root module
+ modulePathDescription := fmt.Sprintf(" in module %s", strings.Join(n.ModulePath[1:], "."))
+ if len(n.ModulePath) == 1 {
+ modulePathDescription = ""
+ }
+
+ for name, declaredType := range prototypes {
+ proposedValue, ok := n.Variables[name]
+ if !ok {
+ // This means the default value should be used as no overriding value
+ // has been set. Therefore we should continue as no check is necessary.
+ continue
+ }
+
+ if proposedValue == config.UnknownVariableValue {
+ continue
+ }
+
+ switch declaredType {
+ case config.VariableTypeString:
+ switch proposedValue.(type) {
+ case string:
+ continue
+ default:
+ return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
+ name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
+ }
+ case config.VariableTypeMap:
+ switch proposedValue.(type) {
+ case map[string]interface{}:
+ continue
+ default:
+ return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
+ name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
+ }
+ case config.VariableTypeList:
+ switch proposedValue.(type) {
+ case []interface{}:
+ continue
+ default:
+ return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
+ name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
+ }
+ default:
+ return nil, fmt.Errorf("variable %s%s should be type %s, got type string",
+ name, modulePathDescription, declaredType.Printable())
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalSetVariables is an EvalNode implementation that sets the variables
+// explicitly for interpolation later.
+type EvalSetVariables struct {
+ Module *string
+ Variables map[string]interface{}
+}
+
+// TODO: test
+func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) {
+ ctx.SetVariables(*n.Module, n.Variables)
+ return nil, nil
+}
+
+// EvalVariableBlock is an EvalNode implementation that evaluates the
+// given configuration, and uses the final values as a way to set the
+// mapping.
+type EvalVariableBlock struct {
+ Config **ResourceConfig
+ VariableValues map[string]interface{}
+}
+
+func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) {
+ // Clear out the existing mapping
+ for k, _ := range n.VariableValues {
+ delete(n.VariableValues, k)
+ }
+
+ // Get our configuration
+ rc := *n.Config
+ for k, v := range rc.Config {
+ vKind := reflect.ValueOf(v).Type().Kind()
+
+ switch vKind {
+ case reflect.Slice:
+ var vSlice []interface{}
+ if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil {
+ n.VariableValues[k] = vSlice
+ continue
+ }
+ case reflect.Map:
+ var vMap map[string]interface{}
+ if err := hilmapstructure.WeakDecode(v, &vMap); err == nil {
+ n.VariableValues[k] = vMap
+ continue
+ }
+ default:
+ var vString string
+ if err := hilmapstructure.WeakDecode(v, &vString); err == nil {
+ n.VariableValues[k] = vString
+ continue
+ }
+ }
+
+ return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k)
+ }
+
+ for _, path := range rc.ComputedKeys {
+ log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path)
+ err := n.setUnknownVariableValueForPath(path)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error {
+ pathComponents := strings.Split(path, ".")
+
+ if len(pathComponents) < 1 {
+ return fmt.Errorf("No path comoponents in %s", path)
+ }
+
+ if len(pathComponents) == 1 {
+ // Special case the "top level" since we know the type
+ if _, ok := n.VariableValues[pathComponents[0]]; !ok {
+ n.VariableValues[pathComponents[0]] = config.UnknownVariableValue
+ }
+ return nil
+ }
+
+ // Otherwise find the correct point in the tree and then set to unknown
+ var current interface{} = n.VariableValues[pathComponents[0]]
+ for i := 1; i < len(pathComponents); i++ {
+ switch tCurrent := current.(type) {
+ case []interface{}:
+ index, err := strconv.Atoi(pathComponents[i])
+ if err != nil {
+ return fmt.Errorf("Cannot convert %s to slice index in path %s",
+ pathComponents[i], path)
+ }
+ current = tCurrent[index]
+ case []map[string]interface{}:
+ index, err := strconv.Atoi(pathComponents[i])
+ if err != nil {
+ return fmt.Errorf("Cannot convert %s to slice index in path %s",
+ pathComponents[i], path)
+ }
+ current = tCurrent[index]
+ case map[string]interface{}:
+ if val, hasVal := tCurrent[pathComponents[i]]; hasVal {
+ current = val
+ continue
+ }
+
+ tCurrent[pathComponents[i]] = config.UnknownVariableValue
+ break
+ }
+ }
+
+ return nil
+}
+
+// EvalCoerceMapVariable is an EvalNode implementation that recognizes a
+// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a
+// bare map literal is indistinguishable from a list of maps w/ one element.
+//
+// We take all the same inputs as EvalTypeCheckVariable above, since we need
+// both the target type and the proposed value in order to properly coerce.
+type EvalCoerceMapVariable struct {
+ Variables map[string]interface{}
+ ModulePath []string
+ ModuleTree *module.Tree
+}
+
+// Eval implements the EvalNode interface. See EvalCoerceMapVariable for
+// details.
+func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) {
+ currentTree := n.ModuleTree
+ for _, pathComponent := range n.ModulePath[1:] {
+ currentTree = currentTree.Children()[pathComponent]
+ }
+ targetConfig := currentTree.Config()
+
+ prototypes := make(map[string]config.VariableType)
+ for _, variable := range targetConfig.Variables {
+ prototypes[variable.Name] = variable.Type()
+ }
+
+ for name, declaredType := range prototypes {
+ if declaredType != config.VariableTypeMap {
+ continue
+ }
+
+ proposedValue, ok := n.Variables[name]
+ if !ok {
+ continue
+ }
+
+ if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 {
+ if m, ok := list[0].(map[string]interface{}); ok {
+ log.Printf("[DEBUG] EvalCoerceMapVariable: "+
+ "Coercing single element list into map: %#v", m)
+ n.Variables[name] = m
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+// hclTypeName returns the name of the type that would represent this value in
+// a config file, or falls back to the Go type name if there's no corresponding
+// HCL type. This is used for formatted output, not for comparing types.
+func hclTypeName(i interface{}) string {
+ switch k := reflect.Indirect(reflect.ValueOf(i)).Kind(); k {
+ case reflect.Bool:
+ return "boolean"
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
+ return "number"
+ case reflect.Array, reflect.Slice:
+ return "list"
+ case reflect.Map:
+ return "map"
+ case reflect.String:
+ return "string"
+ default:
+ // fall back to the Go type if there's no match
+ return k.String()
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
new file mode 100644
index 00000000..00392efe
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
@@ -0,0 +1,119 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config"
+)
+
+// ProviderEvalTree returns the evaluation tree for initializing and
+// configuring providers.
+func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
+ var provider ResourceProvider
+ var resourceConfig *ResourceConfig
+
+ seq := make([]EvalNode, 0, 5)
+ seq = append(seq, &EvalInitProvider{Name: n})
+
+ // Input stuff
+ seq = append(seq, &EvalOpFilter{
+ Ops: []walkOperation{walkInput, walkImport},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n,
+ Output: &provider,
+ },
+ &EvalInterpolate{
+ Config: config,
+ Output: &resourceConfig,
+ },
+ &EvalBuildProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ Output: &resourceConfig,
+ },
+ &EvalInputProvider{
+ Name: n,
+ Provider: &provider,
+ Config: &resourceConfig,
+ },
+ },
+ },
+ })
+
+ seq = append(seq, &EvalOpFilter{
+ Ops: []walkOperation{walkValidate},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n,
+ Output: &provider,
+ },
+ &EvalInterpolate{
+ Config: config,
+ Output: &resourceConfig,
+ },
+ &EvalBuildProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ Output: &resourceConfig,
+ },
+ &EvalValidateProvider{
+ Provider: &provider,
+ Config: &resourceConfig,
+ },
+ &EvalSetProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ },
+ },
+ },
+ })
+
+ // Apply stuff
+ seq = append(seq, &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n,
+ Output: &provider,
+ },
+ &EvalInterpolate{
+ Config: config,
+ Output: &resourceConfig,
+ },
+ &EvalBuildProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ Output: &resourceConfig,
+ },
+ &EvalSetProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ },
+ },
+ },
+ })
+
+ // We configure on everything but validate, since validate may
+ // not have access to all the variables.
+ seq = append(seq, &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalConfigProvider{
+ Provider: n,
+ Config: &resourceConfig,
+ },
+ },
+ },
+ })
+
+ return &EvalSequence{Nodes: seq}
+}
+
+// CloseProviderEvalTree returns the evaluation tree for closing
+// provider connections that aren't needed anymore.
+func CloseProviderEvalTree(n string) EvalNode {
+ return &EvalCloseProvider{Name: n}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go
new file mode 100644
index 00000000..48ce6a33
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go
@@ -0,0 +1,172 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "runtime/debug"
+ "strings"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// RootModuleName is the name given to the root module implicitly.
+const RootModuleName = "root"
+
+// RootModulePath is the path for the root module.
+var RootModulePath = []string{RootModuleName}
+
+// Graph represents the graph that Terraform uses to represent resources
+// and their dependencies.
+type Graph struct {
+ // Graph is the actual DAG. This is embedded so you can call the DAG
+ // methods directly.
+ dag.AcyclicGraph
+
+ // Path is the path in the module tree that this Graph represents.
+ // The root is represented by a single element list containing
+ // RootModuleName
+ Path []string
+
+ // debugName is a name for reference in the debug output. This is usually
+ // to indicate what topmost builder was, and if this graph is a shadow or
+ // not.
+ debugName string
+}
+
+func (g *Graph) DirectedGraph() dag.Grapher {
+ return &g.AcyclicGraph
+}
+
+// Walk walks the graph with the given walker for callbacks. The graph
+// will be walked with full parallelism, so the walker should expect
+// to be called in concurrently.
+func (g *Graph) Walk(walker GraphWalker) error {
+ return g.walk(walker)
+}
+
+func (g *Graph) walk(walker GraphWalker) error {
+ // The callbacks for enter/exiting a graph
+ ctx := walker.EnterPath(g.Path)
+ defer walker.ExitPath(g.Path)
+
+ // Get the path for logs
+ path := strings.Join(ctx.Path(), ".")
+
+ // Determine if our walker is a panic wrapper
+ panicwrap, ok := walker.(GraphWalkerPanicwrapper)
+ if !ok {
+ panicwrap = nil // just to be sure
+ }
+
+ debugName := "walk-graph.json"
+ if g.debugName != "" {
+ debugName = g.debugName + "-" + debugName
+ }
+
+ debugBuf := dbug.NewFileWriter(debugName)
+ g.SetDebugWriter(debugBuf)
+ defer debugBuf.Close()
+
+ // Walk the graph.
+ var walkFn dag.WalkFunc
+ walkFn = func(v dag.Vertex) (rerr error) {
+ log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v))
+ g.DebugVisitInfo(v, g.debugName)
+
+ // If we have a panic wrap GraphWalker and a panic occurs, recover
+ // and call that. We ensure the return value is an error, however,
+ // so that future nodes are not called.
+ defer func() {
+ // If no panicwrap, do nothing
+ if panicwrap == nil {
+ return
+ }
+
+ // If no panic, do nothing
+ err := recover()
+ if err == nil {
+ return
+ }
+
+ // Modify the return value to show the error
+ rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s",
+ dag.VertexName(v), err, debug.Stack())
+
+ // Call the panic wrapper
+ panicwrap.Panic(v, err)
+ }()
+
+ walker.EnterVertex(v)
+ defer walker.ExitVertex(v, rerr)
+
+ // vertexCtx is the context that we use when evaluating. This
+ // is normally the context of our graph but can be overridden
+ // with a GraphNodeSubPath impl.
+ vertexCtx := ctx
+ if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 {
+ vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path()))
+ defer walker.ExitPath(pn.Path())
+ }
+
+ // If the node is eval-able, then evaluate it.
+ if ev, ok := v.(GraphNodeEvalable); ok {
+ tree := ev.EvalTree()
+ if tree == nil {
+ panic(fmt.Sprintf(
+ "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v))
+ }
+
+ // Allow the walker to change our tree if needed. Eval,
+ // then callback with the output.
+ log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v))
+
+ g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path))
+
+ tree = walker.EnterEvalTree(v, tree)
+ output, err := Eval(tree, vertexCtx)
+ if rerr = walker.ExitEvalTree(v, output, err); rerr != nil {
+ return
+ }
+ }
+
+ // If the node is dynamically expanded, then expand it
+ if ev, ok := v.(GraphNodeDynamicExpandable); ok {
+ log.Printf(
+ "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph",
+ path,
+ dag.VertexName(v))
+
+ g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path))
+
+ g, err := ev.DynamicExpand(vertexCtx)
+ if err != nil {
+ rerr = err
+ return
+ }
+ if g != nil {
+ // Walk the subgraph
+ if rerr = g.walk(walker); rerr != nil {
+ return
+ }
+ }
+ }
+
+ // If the node has a subgraph, then walk the subgraph
+ if sn, ok := v.(GraphNodeSubgraph); ok {
+ log.Printf(
+ "[DEBUG] vertex '%s.%s': walking subgraph",
+ path,
+ dag.VertexName(v))
+
+ g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path))
+
+ if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil {
+ return
+ }
+ }
+
+ return nil
+ }
+
+ return g.AcyclicGraph.Walk(walkFn)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
new file mode 100644
index 00000000..6374bb90
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
@@ -0,0 +1,77 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// GraphBuilder is an interface that can be implemented and used with
+// Terraform to build the graph that Terraform walks.
+type GraphBuilder interface {
+ // Build builds the graph for the given module path. It is up to
+ // the interface implementation whether this build should expand
+ // the graph or not.
+ Build(path []string) (*Graph, error)
+}
+
+// BasicGraphBuilder is a GraphBuilder that builds a graph out of a
+// series of transforms and (optionally) validates the graph is a valid
+// structure.
+type BasicGraphBuilder struct {
+ Steps []GraphTransformer
+ Validate bool
+ // Optional name to add to the graph debug log
+ Name string
+}
+
+func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
+ g := &Graph{Path: path}
+
+ debugName := "graph.json"
+ if b.Name != "" {
+ debugName = b.Name + "-" + debugName
+ }
+ debugBuf := dbug.NewFileWriter(debugName)
+ g.SetDebugWriter(debugBuf)
+ defer debugBuf.Close()
+
+ for _, step := range b.Steps {
+ if step == nil {
+ continue
+ }
+
+ stepName := fmt.Sprintf("%T", step)
+ dot := strings.LastIndex(stepName, ".")
+ if dot >= 0 {
+ stepName = stepName[dot+1:]
+ }
+
+ debugOp := g.DebugOperation(stepName, "")
+ err := step.Transform(g)
+
+ errMsg := ""
+ if err != nil {
+ errMsg = err.Error()
+ }
+ debugOp.End(errMsg)
+
+ log.Printf(
+ "[TRACE] Graph after step %T:\n\n%s",
+ step, g.StringWithNodeTypes())
+
+ if err != nil {
+ return g, err
+ }
+ }
+
+ // Validate the graph structure
+ if b.Validate {
+ if err := g.Validate(); err != nil {
+ log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String())
+ return nil, err
+ }
+ }
+
+ return g, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
new file mode 100644
index 00000000..38a90f27
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
@@ -0,0 +1,141 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ApplyGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for applying a Terraform diff.
+//
+// Because the graph is built from the diff (vs. the config or state),
+// this helps ensure that the apply-time graph doesn't modify any resources
+// that aren't explicitly in the diff. There are other scenarios where the
+// diff can be deviated, so this is just one layer of protection.
+type ApplyGraphBuilder struct {
+ // Module is the root module for the graph to build.
+ Module *module.Tree
+
+ // Diff is the diff to apply.
+ Diff *Diff
+
+ // State is the current state
+ State *State
+
+ // Providers is the list of providers supported.
+ Providers []string
+
+ // Provisioners is the list of provisioners supported.
+ Provisioners []string
+
+ // Targets are resources to target. This is only required to make sure
+ // unnecessary outputs aren't included in the apply graph. The plan
+ // builder successfully handles targeting resources. In the future,
+ // outputs should go into the diff so that this is unnecessary.
+ Targets []string
+
+ // DisableReduce, if true, will not reduce the graph. Great for testing.
+ DisableReduce bool
+
+ // Destroy, if true, represents a pure destroy operation
+ Destroy bool
+
+ // Validate will do structural validation of the graph.
+ Validate bool
+}
+
+// See GraphBuilder
+func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: b.Validate,
+ Name: "ApplyGraphBuilder",
+ }).Build(path)
+}
+
+// See GraphBuilder
+func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
+ // Custom factory for creating providers.
+ concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ return &NodeApplyableResource{
+ NodeAbstractResource: a,
+ }
+ }
+
+ steps := []GraphTransformer{
+ // Creates all the nodes represented in the diff.
+ &DiffTransformer{
+ Concrete: concreteResource,
+
+ Diff: b.Diff,
+ Module: b.Module,
+ State: b.State,
+ },
+
+ // Create orphan output nodes
+ &OrphanOutputTransformer{Module: b.Module, State: b.State},
+
+ // Attach the configuration to any resources
+ &AttachResourceConfigTransformer{Module: b.Module},
+
+ // Attach the state
+ &AttachStateTransformer{State: b.State},
+
+ // Create all the providers
+ &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: b.Module},
+
+ // Destruction ordering
+ &DestroyEdgeTransformer{Module: b.Module, State: b.State},
+ GraphTransformIf(
+ func() bool { return !b.Destroy },
+ &CBDEdgeTransformer{Module: b.Module, State: b.State},
+ ),
+
+ // Provisioner-related transformations
+ &MissingProvisionerTransformer{Provisioners: b.Provisioners},
+ &ProvisionerTransformer{},
+
+ // Add root variables
+ &RootVariableTransformer{Module: b.Module},
+
+ // Add the outputs
+ &OutputTransformer{Module: b.Module},
+
+ // Add module variables
+ &ModuleVariableTransformer{Module: b.Module},
+
+ // Connect references so ordering is correct
+ &ReferenceTransformer{},
+
+ // Add the node to fix the state count boundaries
+ &CountBoundaryTransformer{},
+
+ // Target
+ &TargetsTransformer{Targets: b.Targets},
+
+ // Close opened plugin connections
+ &CloseProviderTransformer{},
+ &CloseProvisionerTransformer{},
+
+ // Single root
+ &RootTransformer{},
+ }
+
+ if !b.DisableReduce {
+ // Perform the transitive reduction to make our graph a bit
+ // more sane if possible (it usually is possible).
+ steps = append(steps, &TransitiveReductionTransformer{})
+ }
+
+ return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
new file mode 100644
index 00000000..014b348e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
@@ -0,0 +1,67 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for
+// planning a pure-destroy.
+//
+// Planning a pure destroy operation is simple because we can ignore most
+// ordering configuration and simply reverse the state.
+type DestroyPlanGraphBuilder struct {
+ // Module is the root module for the graph to build.
+ Module *module.Tree
+
+ // State is the current state
+ State *State
+
+ // Targets are resources to target
+ Targets []string
+
+ // Validate will do structural validation of the graph.
+ Validate bool
+}
+
+// See GraphBuilder
+func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: b.Validate,
+ Name: "DestroyPlanGraphBuilder",
+ }).Build(path)
+}
+
+// See GraphBuilder
+func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ return &NodePlanDestroyableResource{
+ NodeAbstractResource: a,
+ }
+ }
+
+ steps := []GraphTransformer{
+ // Creates all the nodes represented in the state.
+ &StateTransformer{
+ Concrete: concreteResource,
+ State: b.State,
+ },
+
+ // Attach the configuration to any resources
+ &AttachResourceConfigTransformer{Module: b.Module},
+
+ // Destruction ordering. We require this only so that
+ // targeting below will prune the correct things.
+ &DestroyEdgeTransformer{Module: b.Module, State: b.State},
+
+ // Target. Note we don't set "Destroy: true" here since we already
+ // created proper destroy ordering.
+ &TargetsTransformer{Targets: b.Targets},
+
+ // Single root
+ &RootTransformer{},
+ }
+
+ return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
new file mode 100644
index 00000000..7070c59e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
@@ -0,0 +1,76 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ImportGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for importing resources into Terraform. This is a much, much
+// simpler graph than a normal configuration graph.
+type ImportGraphBuilder struct {
+ // ImportTargets are the list of resources to import.
+ ImportTargets []*ImportTarget
+
+ // Module is the module to add to the graph. See ImportOpts.Module.
+ Module *module.Tree
+
+ // Providers is the list of providers supported.
+ Providers []string
+}
+
+// Build builds the graph according to the steps returned by Steps.
+func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: true,
+ Name: "ImportGraphBuilder",
+ }).Build(path)
+}
+
+// Steps returns the ordered list of GraphTransformers that must be executed
+// to build a complete graph.
+func (b *ImportGraphBuilder) Steps() []GraphTransformer {
+ // Get the module. If we don't have one, we just use an empty tree
+ // so that the transform still works but does nothing.
+ mod := b.Module
+ if mod == nil {
+ mod = module.NewEmptyTree()
+ }
+
+ // Custom factory for creating providers.
+ concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ steps := []GraphTransformer{
+ // Create all our resources from the configuration and state
+ &ConfigTransformer{Module: mod},
+
+ // Add the import steps
+ &ImportStateTransformer{Targets: b.ImportTargets},
+
+ // Provider-related transformations
+ &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: mod},
+
+ // This validates that the providers only depend on variables
+ &ImportProviderValidateTransformer{},
+
+ // Close opened plugin connections
+ &CloseProviderTransformer{},
+
+ // Single root
+ &RootTransformer{},
+
+ // Optimize
+ &TransitiveReductionTransformer{},
+ }
+
+ return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
new file mode 100644
index 00000000..0df48cdb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
@@ -0,0 +1,27 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// InputGraphBuilder creates the graph for the input operation.
+//
+// Unlike other graph builders, this is a function since it currently modifies
+// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be
+// modified and should not be used for any other operations.
+func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
+ // We're going to customize the concrete functions
+ p.CustomConcrete = true
+
+ // Set the provider to the normal provider. This will ask for input.
+ p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ // We purposely don't set any more concrete fields since the remainder
+ // should be no-ops.
+
+ return p
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
new file mode 100644
index 00000000..02d86970
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
@@ -0,0 +1,161 @@
+package terraform
+
+import (
+ "sync"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// PlanGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for planning (creating a Terraform Diff).
+//
+// The primary difference between this graph and others:
+//
+// * Based on the config since it represents the target state
+//
+// * Ignores lifecycle options since no lifecycle events occur here. This
+// simplifies the graph significantly since complex transforms such as
+// create-before-destroy can be completely ignored.
+//
+type PlanGraphBuilder struct {
+ // Module is the root module for the graph to build.
+ Module *module.Tree
+
+ // State is the current state
+ State *State
+
+ // Providers is the list of providers supported.
+ Providers []string
+
+ // Provisioners is the list of provisioners supported.
+ Provisioners []string
+
+ // Targets are resources to target
+ Targets []string
+
+ // DisableReduce, if true, will not reduce the graph. Great for testing.
+ DisableReduce bool
+
+ // Validate will do structural validation of the graph.
+ Validate bool
+
+ // CustomConcrete can be set to customize the node types created
+ // for various parts of the plan. This is useful in order to customize
+ // the plan behavior.
+ CustomConcrete bool
+ ConcreteProvider ConcreteProviderNodeFunc
+ ConcreteResource ConcreteResourceNodeFunc
+ ConcreteResourceOrphan ConcreteResourceNodeFunc
+
+ once sync.Once
+}
+
+// See GraphBuilder
+func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: b.Validate,
+ Name: "PlanGraphBuilder",
+ }).Build(path)
+}
+
+// See GraphBuilder
+func (b *PlanGraphBuilder) Steps() []GraphTransformer {
+ b.once.Do(b.init)
+
+ steps := []GraphTransformer{
+ // Creates all the resources represented in the config
+ &ConfigTransformer{
+ Concrete: b.ConcreteResource,
+ Module: b.Module,
+ },
+
+ // Add the outputs
+ &OutputTransformer{Module: b.Module},
+
+ // Add orphan resources
+ &OrphanResourceTransformer{
+ Concrete: b.ConcreteResourceOrphan,
+ State: b.State,
+ Module: b.Module,
+ },
+
+ // Attach the configuration to any resources
+ &AttachResourceConfigTransformer{Module: b.Module},
+
+ // Attach the state
+ &AttachStateTransformer{State: b.State},
+
+ // Add root variables
+ &RootVariableTransformer{Module: b.Module},
+
+ // Create all the providers
+ &MissingProviderTransformer{Providers: b.Providers, Concrete: b.ConcreteProvider},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: b.Module},
+
+ // Provisioner-related transformations. Only add these if requested.
+ GraphTransformIf(
+ func() bool { return b.Provisioners != nil },
+ GraphTransformMulti(
+ &MissingProvisionerTransformer{Provisioners: b.Provisioners},
+ &ProvisionerTransformer{},
+ ),
+ ),
+
+ // Add module variables
+ &ModuleVariableTransformer{Module: b.Module},
+
+ // Connect so that the references are ready for targeting. We'll
+ // have to connect again later for providers and so on.
+ &ReferenceTransformer{},
+
+ // Target
+ &TargetsTransformer{Targets: b.Targets},
+
+ // Close opened plugin connections
+ &CloseProviderTransformer{},
+ &CloseProvisionerTransformer{},
+
+ // Single root
+ &RootTransformer{},
+ }
+
+ if !b.DisableReduce {
+ // Perform the transitive reduction to make our graph a bit
+ // more sane if possible (it usually is possible).
+ steps = append(steps, &TransitiveReductionTransformer{})
+ }
+
+ return steps
+}
+
+func (b *PlanGraphBuilder) init() {
+ // Do nothing if the user requests customizing the fields
+ if b.CustomConcrete {
+ return
+ }
+
+ b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
+ return &NodePlannableResource{
+ NodeAbstractCountResource: &NodeAbstractCountResource{
+ NodeAbstractResource: a,
+ },
+ }
+ }
+
+ b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex {
+ return &NodePlannableResourceOrphan{
+ NodeAbstractResource: a,
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
new file mode 100644
index 00000000..88ae3380
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
@@ -0,0 +1,132 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// RefreshGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for refreshing (updating the Terraform state).
+//
+// The primary difference between this graph and others:
+//
+// * Based on the state since it represents the only resources that
+// need to be refreshed.
+//
+// * Ignores lifecycle options since no lifecycle events occur here. This
+// simplifies the graph significantly since complex transforms such as
+// create-before-destroy can be completely ignored.
+//
+type RefreshGraphBuilder struct {
+ // Module is the root module for the graph to build.
+ Module *module.Tree
+
+ // State is the current state
+ State *State
+
+ // Providers is the list of providers supported.
+ Providers []string
+
+ // Targets are resources to target
+ Targets []string
+
+ // DisableReduce, if true, will not reduce the graph. Great for testing.
+ DisableReduce bool
+
+ // Validate will do structural validation of the graph.
+ Validate bool
+}
+
+// See GraphBuilder
+func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: b.Validate,
+ Name: "RefreshGraphBuilder",
+ }).Build(path)
+}
+
+// See GraphBuilder
+func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
+ // Custom factory for creating providers.
+ concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ return &NodeRefreshableResource{
+ NodeAbstractResource: a,
+ }
+ }
+
+ concreteDataResource := func(a *NodeAbstractResource) dag.Vertex {
+ return &NodeRefreshableDataResource{
+ NodeAbstractCountResource: &NodeAbstractCountResource{
+ NodeAbstractResource: a,
+ },
+ }
+ }
+
+ steps := []GraphTransformer{
+ // Creates all the resources represented in the state
+ &StateTransformer{
+ Concrete: concreteResource,
+ State: b.State,
+ },
+
+ // Creates all the data resources that aren't in the state
+ &ConfigTransformer{
+ Concrete: concreteDataResource,
+ Module: b.Module,
+ Unique: true,
+ ModeFilter: true,
+ Mode: config.DataResourceMode,
+ },
+
+ // Attach the state
+ &AttachStateTransformer{State: b.State},
+
+ // Attach the configuration to any resources
+ &AttachResourceConfigTransformer{Module: b.Module},
+
+ // Add root variables
+ &RootVariableTransformer{Module: b.Module},
+
+ // Create all the providers
+ &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: b.Module},
+
+ // Add the outputs
+ &OutputTransformer{Module: b.Module},
+
+ // Add module variables
+ &ModuleVariableTransformer{Module: b.Module},
+
+ // Connect so that the references are ready for targeting. We'll
+ // have to connect again later for providers and so on.
+ &ReferenceTransformer{},
+
+ // Target
+ &TargetsTransformer{Targets: b.Targets},
+
+ // Close opened plugin connections
+ &CloseProviderTransformer{},
+
+ // Single root
+ &RootTransformer{},
+ }
+
+ if !b.DisableReduce {
+ // Perform the transitive reduction to make our graph a bit
+ // more sane if possible (it usually is possible).
+ steps = append(steps, &TransitiveReductionTransformer{})
+ }
+
+ return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
new file mode 100644
index 00000000..645ec7be
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
@@ -0,0 +1,36 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ValidateGraphBuilder creates the graph for the validate operation.
+//
+// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that
+// we only have to validate what we'd normally plan anyways. The
+// PlanGraphBuilder given will be modified so it shouldn't be used for anything
+// else after calling this function.
+func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
+ // We're going to customize the concrete functions
+ p.CustomConcrete = true
+
+ // Set the provider to the normal provider. This will ask for input.
+ p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
+ return &NodeValidatableResource{
+ NodeAbstractCountResource: &NodeAbstractCountResource{
+ NodeAbstractResource: a,
+ },
+ }
+ }
+
+ // We purposely don't set any other concrete types since they don't
+ // require validation.
+
+ return p
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
new file mode 100644
index 00000000..73e3821f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
@@ -0,0 +1,9 @@
+package terraform
+
+import "github.com/hashicorp/terraform/dag"
+
+// GraphDot returns the dot formatting of a visual representation of
+// the given Terraform graph.
+func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) {
+ return string(g.Dot(opts)), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
new file mode 100644
index 00000000..2897eb54
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
@@ -0,0 +1,7 @@
+package terraform
+
+// GraphNodeSubPath says that a node is part of a graph with a
+// different path, and the context should be adjusted accordingly.
+type GraphNodeSubPath interface {
+ Path() []string
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
new file mode 100644
index 00000000..34ce6f64
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
@@ -0,0 +1,60 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphWalker is an interface that can be implemented that when used
+// with Graph.Walk will invoke the given callbacks under certain events.
+type GraphWalker interface {
+ EnterPath([]string) EvalContext
+ ExitPath([]string)
+ EnterVertex(dag.Vertex)
+ ExitVertex(dag.Vertex, error)
+ EnterEvalTree(dag.Vertex, EvalNode) EvalNode
+ ExitEvalTree(dag.Vertex, interface{}, error) error
+}
+
+// GrpahWalkerPanicwrapper can be optionally implemented to catch panics
+// that occur while walking the graph. This is not generally recommended
+// since panics should crash Terraform and result in a bug report. However,
+// this is particularly useful for situations like the shadow graph where
+// you don't ever want to cause a panic.
+type GraphWalkerPanicwrapper interface {
+ GraphWalker
+
+ // Panic is called when a panic occurs. This will halt the panic from
+ // propogating so if the walker wants it to crash still it should panic
+ // again. This is called from within a defer so runtime/debug.Stack can
+ // be used to get the stack trace of the panic.
+ Panic(dag.Vertex, interface{})
+}
+
+// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow
+// the panics. This doesn't lose the panics since the panics are still
+// returned as errors as part of a graph walk.
+func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper {
+ return &graphWalkerPanicwrapper{
+ GraphWalker: w,
+ }
+}
+
+type graphWalkerPanicwrapper struct {
+ GraphWalker
+}
+
+func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {}
+
+// NullGraphWalker is a GraphWalker implementation that does nothing.
+// This can be embedded within other GraphWalker implementations for easily
+// implementing all the required functions.
+type NullGraphWalker struct{}
+
+func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) }
+func (NullGraphWalker) ExitPath([]string) {}
+func (NullGraphWalker) EnterVertex(dag.Vertex) {}
+func (NullGraphWalker) ExitVertex(dag.Vertex, error) {}
+func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n }
+func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error {
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
new file mode 100644
index 00000000..e63b4603
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
@@ -0,0 +1,157 @@
+package terraform
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "sync"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ContextGraphWalker is the GraphWalker implementation used with the
+// Context struct to walk and evaluate the graph.
+type ContextGraphWalker struct {
+ NullGraphWalker
+
+ // Configurable values
+ Context *Context
+ Operation walkOperation
+ StopContext context.Context
+
+ // Outputs, do not set these. Do not read these while the graph
+ // is being walked.
+ ValidationWarnings []string
+ ValidationErrors []error
+
+ errorLock sync.Mutex
+ once sync.Once
+ contexts map[string]*BuiltinEvalContext
+ contextLock sync.Mutex
+ interpolaterVars map[string]map[string]interface{}
+ interpolaterVarLock sync.Mutex
+ providerCache map[string]ResourceProvider
+ providerConfigCache map[string]*ResourceConfig
+ providerLock sync.Mutex
+ provisionerCache map[string]ResourceProvisioner
+ provisionerLock sync.Mutex
+}
+
+func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
+ w.once.Do(w.init)
+
+ w.contextLock.Lock()
+ defer w.contextLock.Unlock()
+
+ // If we already have a context for this path cached, use that
+ key := PathCacheKey(path)
+ if ctx, ok := w.contexts[key]; ok {
+ return ctx
+ }
+
+ // Setup the variables for this interpolater
+ variables := make(map[string]interface{})
+ if len(path) <= 1 {
+ for k, v := range w.Context.variables {
+ variables[k] = v
+ }
+ }
+ w.interpolaterVarLock.Lock()
+ if m, ok := w.interpolaterVars[key]; ok {
+ for k, v := range m {
+ variables[k] = v
+ }
+ }
+ w.interpolaterVars[key] = variables
+ w.interpolaterVarLock.Unlock()
+
+ ctx := &BuiltinEvalContext{
+ StopContext: w.StopContext,
+ PathValue: path,
+ Hooks: w.Context.hooks,
+ InputValue: w.Context.uiInput,
+ Components: w.Context.components,
+ ProviderCache: w.providerCache,
+ ProviderConfigCache: w.providerConfigCache,
+ ProviderInputConfig: w.Context.providerInputConfig,
+ ProviderLock: &w.providerLock,
+ ProvisionerCache: w.provisionerCache,
+ ProvisionerLock: &w.provisionerLock,
+ DiffValue: w.Context.diff,
+ DiffLock: &w.Context.diffLock,
+ StateValue: w.Context.state,
+ StateLock: &w.Context.stateLock,
+ Interpolater: &Interpolater{
+ Operation: w.Operation,
+ Meta: w.Context.meta,
+ Module: w.Context.module,
+ State: w.Context.state,
+ StateLock: &w.Context.stateLock,
+ VariableValues: variables,
+ VariableValuesLock: &w.interpolaterVarLock,
+ },
+ InterpolaterVars: w.interpolaterVars,
+ InterpolaterVarLock: &w.interpolaterVarLock,
+ }
+
+ w.contexts[key] = ctx
+ return ctx
+}
+
+func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
+ log.Printf("[TRACE] [%s] Entering eval tree: %s",
+ w.Operation, dag.VertexName(v))
+
+ // Acquire a lock on the semaphore
+ w.Context.parallelSem.Acquire()
+
+ // We want to filter the evaluation tree to only include operations
+ // that belong in this operation.
+ return EvalFilter(n, EvalNodeFilterOp(w.Operation))
+}
+
+func (w *ContextGraphWalker) ExitEvalTree(
+ v dag.Vertex, output interface{}, err error) error {
+ log.Printf("[TRACE] [%s] Exiting eval tree: %s",
+ w.Operation, dag.VertexName(v))
+
+ // Release the semaphore
+ w.Context.parallelSem.Release()
+
+ if err == nil {
+ return nil
+ }
+
+ // Acquire the lock because anything is going to require a lock.
+ w.errorLock.Lock()
+ defer w.errorLock.Unlock()
+
+ // Try to get a validation error out of it. If its not a validation
+ // error, then just record the normal error.
+ verr, ok := err.(*EvalValidateError)
+ if !ok {
+ return err
+ }
+
+ for _, msg := range verr.Warnings {
+ w.ValidationWarnings = append(
+ w.ValidationWarnings,
+ fmt.Sprintf("%s: %s", dag.VertexName(v), msg))
+ }
+ for _, e := range verr.Errors {
+ w.ValidationErrors = append(
+ w.ValidationErrors,
+ errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e))
+ }
+
+ return nil
+}
+
+func (w *ContextGraphWalker) init() {
+ w.contexts = make(map[string]*BuiltinEvalContext, 5)
+ w.providerCache = make(map[string]ResourceProvider, 5)
+ w.providerConfigCache = make(map[string]*ResourceConfig, 5)
+ w.provisionerCache = make(map[string]ResourceProvisioner, 5)
+ w.interpolaterVars = make(map[string]map[string]interface{}, 5)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
new file mode 100644
index 00000000..3fb37481
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
@@ -0,0 +1,18 @@
+package terraform
+
+//go:generate stringer -type=walkOperation graph_walk_operation.go
+
+// walkOperation is an enum which tells the walkContext what to do.
+type walkOperation byte
+
+const (
+ walkInvalid walkOperation = iota
+ walkInput
+ walkApply
+ walkPlan
+ walkPlanDestroy
+ walkRefresh
+ walkValidate
+ walkDestroy
+ walkImport
+)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
new file mode 100644
index 00000000..e97b4855
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT.
+
+package terraform
+
+import "fmt"
+
+const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate"
+
+var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125}
+
+func (i GraphType) String() string {
+ if i >= GraphType(len(_GraphType_index)-1) {
+ return fmt.Sprintf("GraphType(%d)", i)
+ }
+ return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go
new file mode 100644
index 00000000..ab11e8ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go
@@ -0,0 +1,137 @@
+package terraform
+
+// HookAction is an enum of actions that can be taken as a result of a hook
+// callback. This allows you to modify the behavior of Terraform at runtime.
+type HookAction byte
+
+const (
+ // HookActionContinue continues with processing as usual.
+ HookActionContinue HookAction = iota
+
+ // HookActionHalt halts immediately: no more hooks are processed
+ // and the action that Terraform was about to take is cancelled.
+ HookActionHalt
+)
+
+// Hook is the interface that must be implemented to hook into various
+// parts of Terraform, allowing you to inspect or change behavior at runtime.
+//
+// There are MANY hook points into Terraform. If you only want to implement
+// some hook points, but not all (which is the likely case), then embed the
+// NilHook into your struct, which implements all of the interface but does
+// nothing. Then, override only the functions you want to implement.
+type Hook interface {
+ // PreApply and PostApply are called before and after a single
+ // resource is applied. The error argument in PostApply is the
+ // error, if any, that was returned from the provider Apply call itself.
+ PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error)
+ PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error)
+
+ // PreDiff and PostDiff are called before and after a single resource
+ // resource is diffed.
+ PreDiff(*InstanceInfo, *InstanceState) (HookAction, error)
+ PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error)
+
+ // Provisioning hooks
+ //
+ // All should be self-explanatory. ProvisionOutput is called with
+ // output sent back by the provisioners. This will be called multiple
+ // times as output comes in, but each call should represent a line of
+ // output. The ProvisionOutput method cannot control whether the
+ // hook continues running.
+ PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
+ PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
+ PreProvision(*InstanceInfo, string) (HookAction, error)
+ PostProvision(*InstanceInfo, string, error) (HookAction, error)
+ ProvisionOutput(*InstanceInfo, string, string)
+
+ // PreRefresh and PostRefresh are called before and after a single
+ // resource state is refreshed, respectively.
+ PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
+ PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
+
+ // PostStateUpdate is called after the state is updated.
+ PostStateUpdate(*State) (HookAction, error)
+
+ // PreImportState and PostImportState are called before and after
+ // a single resource's state is being improted.
+ PreImportState(*InstanceInfo, string) (HookAction, error)
+ PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error)
+}
+
+// NilHook is a Hook implementation that does nothing. It exists only to
+// simplify implementing hooks. You can embed this into your Hook implementation
+// and only implement the functions you are interested in.
+type NilHook struct{}
+
+func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) ProvisionOutput(
+ *InstanceInfo, string, string) {
+}
+
+func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostStateUpdate(*State) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+// handleHook turns hook actions into panics. This lets you use the
+// panic/recover mechanism in Go as a flow control mechanism for hook
+// actions.
+func handleHook(a HookAction, err error) {
+ if err != nil {
+ // TODO: handle errors
+ }
+
+ switch a {
+ case HookActionContinue:
+ return
+ case HookActionHalt:
+ panic(HookActionHalt)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
new file mode 100644
index 00000000..0e464006
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
@@ -0,0 +1,245 @@
+package terraform
+
+import "sync"
+
+// MockHook is an implementation of Hook that can be used for tests.
+// It records all of its function calls.
+type MockHook struct {
+ sync.Mutex
+
+ PreApplyCalled bool
+ PreApplyInfo *InstanceInfo
+ PreApplyDiff *InstanceDiff
+ PreApplyState *InstanceState
+ PreApplyReturn HookAction
+ PreApplyError error
+
+ PostApplyCalled bool
+ PostApplyInfo *InstanceInfo
+ PostApplyState *InstanceState
+ PostApplyError error
+ PostApplyReturn HookAction
+ PostApplyReturnError error
+ PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error)
+
+ PreDiffCalled bool
+ PreDiffInfo *InstanceInfo
+ PreDiffState *InstanceState
+ PreDiffReturn HookAction
+ PreDiffError error
+
+ PostDiffCalled bool
+ PostDiffInfo *InstanceInfo
+ PostDiffDiff *InstanceDiff
+ PostDiffReturn HookAction
+ PostDiffError error
+
+ PreProvisionResourceCalled bool
+ PreProvisionResourceInfo *InstanceInfo
+ PreProvisionInstanceState *InstanceState
+ PreProvisionResourceReturn HookAction
+ PreProvisionResourceError error
+
+ PostProvisionResourceCalled bool
+ PostProvisionResourceInfo *InstanceInfo
+ PostProvisionInstanceState *InstanceState
+ PostProvisionResourceReturn HookAction
+ PostProvisionResourceError error
+
+ PreProvisionCalled bool
+ PreProvisionInfo *InstanceInfo
+ PreProvisionProvisionerId string
+ PreProvisionReturn HookAction
+ PreProvisionError error
+
+ PostProvisionCalled bool
+ PostProvisionInfo *InstanceInfo
+ PostProvisionProvisionerId string
+ PostProvisionErrorArg error
+ PostProvisionReturn HookAction
+ PostProvisionError error
+
+ ProvisionOutputCalled bool
+ ProvisionOutputInfo *InstanceInfo
+ ProvisionOutputProvisionerId string
+ ProvisionOutputMessage string
+
+ PostRefreshCalled bool
+ PostRefreshInfo *InstanceInfo
+ PostRefreshState *InstanceState
+ PostRefreshReturn HookAction
+ PostRefreshError error
+
+ PreRefreshCalled bool
+ PreRefreshInfo *InstanceInfo
+ PreRefreshState *InstanceState
+ PreRefreshReturn HookAction
+ PreRefreshError error
+
+ PreImportStateCalled bool
+ PreImportStateInfo *InstanceInfo
+ PreImportStateId string
+ PreImportStateReturn HookAction
+ PreImportStateError error
+
+ PostImportStateCalled bool
+ PostImportStateInfo *InstanceInfo
+ PostImportStateState []*InstanceState
+ PostImportStateReturn HookAction
+ PostImportStateError error
+
+ PostStateUpdateCalled bool
+ PostStateUpdateState *State
+ PostStateUpdateReturn HookAction
+ PostStateUpdateError error
+}
+
+func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreApplyCalled = true
+ h.PreApplyInfo = n
+ h.PreApplyDiff = d
+ h.PreApplyState = s
+ return h.PreApplyReturn, h.PreApplyError
+}
+
+func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostApplyCalled = true
+ h.PostApplyInfo = n
+ h.PostApplyState = s
+ h.PostApplyError = e
+
+ if h.PostApplyFn != nil {
+ return h.PostApplyFn(n, s, e)
+ }
+
+ return h.PostApplyReturn, h.PostApplyReturnError
+}
+
+func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreDiffCalled = true
+ h.PreDiffInfo = n
+ h.PreDiffState = s
+ return h.PreDiffReturn, h.PreDiffError
+}
+
+func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostDiffCalled = true
+ h.PostDiffInfo = n
+ h.PostDiffDiff = d
+ return h.PostDiffReturn, h.PostDiffError
+}
+
+func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreProvisionResourceCalled = true
+ h.PreProvisionResourceInfo = n
+ h.PreProvisionInstanceState = s
+ return h.PreProvisionResourceReturn, h.PreProvisionResourceError
+}
+
+func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostProvisionResourceCalled = true
+ h.PostProvisionResourceInfo = n
+ h.PostProvisionInstanceState = s
+ return h.PostProvisionResourceReturn, h.PostProvisionResourceError
+}
+
+func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreProvisionCalled = true
+ h.PreProvisionInfo = n
+ h.PreProvisionProvisionerId = provId
+ return h.PreProvisionReturn, h.PreProvisionError
+}
+
+func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostProvisionCalled = true
+ h.PostProvisionInfo = n
+ h.PostProvisionProvisionerId = provId
+ h.PostProvisionErrorArg = err
+ return h.PostProvisionReturn, h.PostProvisionError
+}
+
+func (h *MockHook) ProvisionOutput(
+ n *InstanceInfo,
+ provId string,
+ msg string) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.ProvisionOutputCalled = true
+ h.ProvisionOutputInfo = n
+ h.ProvisionOutputProvisionerId = provId
+ h.ProvisionOutputMessage = msg
+}
+
+func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreRefreshCalled = true
+ h.PreRefreshInfo = n
+ h.PreRefreshState = s
+ return h.PreRefreshReturn, h.PreRefreshError
+}
+
+func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostRefreshCalled = true
+ h.PostRefreshInfo = n
+ h.PostRefreshState = s
+ return h.PostRefreshReturn, h.PostRefreshError
+}
+
+func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreImportStateCalled = true
+ h.PreImportStateInfo = info
+ h.PreImportStateId = id
+ return h.PreImportStateReturn, h.PreImportStateError
+}
+
+func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostImportStateCalled = true
+ h.PostImportStateInfo = info
+ h.PostImportStateState = s
+ return h.PostImportStateReturn, h.PostImportStateError
+}
+
+func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostStateUpdateCalled = true
+ h.PostStateUpdateState = s
+ return h.PostStateUpdateReturn, h.PostStateUpdateError
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
new file mode 100644
index 00000000..104d0098
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
@@ -0,0 +1,87 @@
+package terraform
+
+import (
+ "sync/atomic"
+)
+
+// stopHook is a private Hook implementation that Terraform uses to
+// signal when to stop or cancel actions.
+type stopHook struct {
+ stop uint32
+}
+
+func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) {
+}
+
+func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostStateUpdate(*State) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) hook() (HookAction, error) {
+ if h.Stopped() {
+ return HookActionHalt, nil
+ }
+
+ return HookActionContinue, nil
+}
+
+// reset should be called within the lock context
+func (h *stopHook) Reset() {
+ atomic.StoreUint32(&h.stop, 0)
+}
+
+func (h *stopHook) Stop() {
+ atomic.StoreUint32(&h.stop, 1)
+}
+
+func (h *stopHook) Stopped() bool {
+ return atomic.LoadUint32(&h.stop) == 1
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
new file mode 100644
index 00000000..08959717
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
@@ -0,0 +1,13 @@
+package terraform
+
+//go:generate stringer -type=InstanceType instancetype.go
+
+// InstanceType is an enum of the various types of instances store in the State
+type InstanceType int
+
+const (
+ TypeInvalid InstanceType = iota
+ TypePrimary
+ TypeTainted
+ TypeDeposed
+)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
new file mode 100644
index 00000000..f69267cd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT.
+
+package terraform
+
+import "fmt"
+
+const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed"
+
+var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44}
+
+func (i InstanceType) String() string {
+ if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) {
+ return fmt.Sprintf("InstanceType(%d)", i)
+ }
+ return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
new file mode 100644
index 00000000..855548c0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
@@ -0,0 +1,790 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/hil"
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/flatmap"
+)
+
+const (
+ // VarEnvPrefix is the prefix of variables that are read from
+ // the environment to set variables here.
+ VarEnvPrefix = "TF_VAR_"
+)
+
+// Interpolater is the structure responsible for determining the values
+// for interpolations such as `aws_instance.foo.bar`.
+type Interpolater struct {
+ Operation walkOperation
+ Meta *ContextMeta
+ Module *module.Tree
+ State *State
+ StateLock *sync.RWMutex
+ VariableValues map[string]interface{}
+ VariableValuesLock *sync.Mutex
+}
+
+// InterpolationScope is the current scope of execution. This is required
+// since some variables which are interpolated are dependent on what we're
+// operating on and where we are.
+type InterpolationScope struct {
+ Path []string
+ Resource *Resource
+}
+
+// Values returns the values for all the variables in the given map.
+func (i *Interpolater) Values(
+ scope *InterpolationScope,
+ vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) {
+ if scope == nil {
+ scope = &InterpolationScope{}
+ }
+
+ result := make(map[string]ast.Variable, len(vars))
+
+ // Copy the default variables
+ if i.Module != nil && scope != nil {
+ mod := i.Module
+ if len(scope.Path) > 1 {
+ mod = i.Module.Child(scope.Path[1:])
+ }
+ for _, v := range mod.Config().Variables {
+ // Set default variables
+ if v.Default == nil {
+ continue
+ }
+
+ n := fmt.Sprintf("var.%s", v.Name)
+ variable, err := hil.InterfaceToVariable(v.Default)
+ if err != nil {
+ return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default)
+ }
+
+ result[n] = variable
+ }
+ }
+
+ for n, rawV := range vars {
+ var err error
+ switch v := rawV.(type) {
+ case *config.CountVariable:
+ err = i.valueCountVar(scope, n, v, result)
+ case *config.ModuleVariable:
+ err = i.valueModuleVar(scope, n, v, result)
+ case *config.PathVariable:
+ err = i.valuePathVar(scope, n, v, result)
+ case *config.ResourceVariable:
+ err = i.valueResourceVar(scope, n, v, result)
+ case *config.SelfVariable:
+ err = i.valueSelfVar(scope, n, v, result)
+ case *config.SimpleVariable:
+ err = i.valueSimpleVar(scope, n, v, result)
+ case *config.TerraformVariable:
+ err = i.valueTerraformVar(scope, n, v, result)
+ case *config.UserVariable:
+ err = i.valueUserVar(scope, n, v, result)
+ default:
+ err = fmt.Errorf("%s: unknown variable type: %T", n, rawV)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return result, nil
+}
+
+func (i *Interpolater) valueCountVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.CountVariable,
+ result map[string]ast.Variable) error {
+ switch v.Type {
+ case config.CountValueIndex:
+ if scope.Resource == nil {
+ return fmt.Errorf("%s: count.index is only valid within resources", n)
+ }
+ result[n] = ast.Variable{
+ Value: scope.Resource.CountIndex,
+ Type: ast.TypeInt,
+ }
+ return nil
+ default:
+ return fmt.Errorf("%s: unknown count type: %#v", n, v.Type)
+ }
+}
+
+func unknownVariable() ast.Variable {
+ return ast.Variable{
+ Type: ast.TypeUnknown,
+ Value: config.UnknownVariableValue,
+ }
+}
+
+func unknownValue() string {
+ return hil.UnknownValue
+}
+
+func (i *Interpolater) valueModuleVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.ModuleVariable,
+ result map[string]ast.Variable) error {
+
+ // Build the path to the child module we want
+ path := make([]string, len(scope.Path), len(scope.Path)+1)
+ copy(path, scope.Path)
+ path = append(path, v.Name)
+
+ // Grab the lock so that if other interpolations are running or
+ // state is being modified, we'll be safe.
+ i.StateLock.RLock()
+ defer i.StateLock.RUnlock()
+
+ // Get the module where we're looking for the value
+ mod := i.State.ModuleByPath(path)
+ if mod == nil {
+ // If the module doesn't exist, then we can return an empty string.
+ // This happens usually only in Refresh() when we haven't populated
+ // a state. During validation, we semantically verify that all
+ // modules reference other modules, and graph ordering should
+ // ensure that the module is in the state, so if we reach this
+ // point otherwise it really is a panic.
+ result[n] = unknownVariable()
+
+ // During apply this is always an error
+ if i.Operation == walkApply {
+ return fmt.Errorf(
+ "Couldn't find module %q for var: %s",
+ v.Name, v.FullKey())
+ }
+ } else {
+ // Get the value from the outputs
+ if outputState, ok := mod.Outputs[v.Field]; ok {
+ output, err := hil.InterfaceToVariable(outputState.Value)
+ if err != nil {
+ return err
+ }
+ result[n] = output
+ } else {
+ // Same reasons as the comment above.
+ result[n] = unknownVariable()
+
+ // During apply this is always an error
+ if i.Operation == walkApply {
+ return fmt.Errorf(
+ "Couldn't find output %q for module var: %s",
+ v.Field, v.FullKey())
+ }
+ }
+ }
+
+ return nil
+}
+
+func (i *Interpolater) valuePathVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.PathVariable,
+ result map[string]ast.Variable) error {
+ switch v.Type {
+ case config.PathValueCwd:
+ wd, err := os.Getwd()
+ if err != nil {
+ return fmt.Errorf(
+ "Couldn't get cwd for var %s: %s",
+ v.FullKey(), err)
+ }
+
+ result[n] = ast.Variable{
+ Value: wd,
+ Type: ast.TypeString,
+ }
+ case config.PathValueModule:
+ if t := i.Module.Child(scope.Path[1:]); t != nil {
+ result[n] = ast.Variable{
+ Value: t.Config().Dir,
+ Type: ast.TypeString,
+ }
+ }
+ case config.PathValueRoot:
+ result[n] = ast.Variable{
+ Value: i.Module.Config().Dir,
+ Type: ast.TypeString,
+ }
+ default:
+ return fmt.Errorf("%s: unknown path type: %#v", n, v.Type)
+ }
+
+ return nil
+
+}
+
+func (i *Interpolater) valueResourceVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.ResourceVariable,
+ result map[string]ast.Variable) error {
+ // If we're computing all dynamic fields, then module vars count
+ // and we mark it as computed.
+ if i.Operation == walkValidate {
+ result[n] = unknownVariable()
+ return nil
+ }
+
+ var variable *ast.Variable
+ var err error
+
+ if v.Multi && v.Index == -1 {
+ variable, err = i.computeResourceMultiVariable(scope, v)
+ } else {
+ variable, err = i.computeResourceVariable(scope, v)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if variable == nil {
+ // During the input walk we tolerate missing variables because
+ // we haven't yet had a chance to refresh state, so dynamic data may
+ // not yet be complete.
+ // If it truly is missing, we'll catch it on a later walk.
+ // This applies only to graph nodes that interpolate during the
+ // config walk, e.g. providers.
+ if i.Operation == walkInput || i.Operation == walkRefresh {
+ result[n] = unknownVariable()
+ return nil
+ }
+
+ return fmt.Errorf("variable %q is nil, but no error was reported", v.Name)
+ }
+
+ result[n] = *variable
+ return nil
+}
+
+func (i *Interpolater) valueSelfVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.SelfVariable,
+ result map[string]ast.Variable) error {
+ if scope == nil || scope.Resource == nil {
+ return fmt.Errorf(
+ "%s: invalid scope, self variables are only valid on resources", n)
+ }
+
+ rv, err := config.NewResourceVariable(fmt.Sprintf(
+ "%s.%s.%d.%s",
+ scope.Resource.Type,
+ scope.Resource.Name,
+ scope.Resource.CountIndex,
+ v.Field))
+ if err != nil {
+ return err
+ }
+
+ return i.valueResourceVar(scope, n, rv, result)
+}
+
+func (i *Interpolater) valueSimpleVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.SimpleVariable,
+ result map[string]ast.Variable) error {
+ // This error message includes some information for people who
+ // relied on this for their template_file data sources. We should
+ // remove this at some point but there isn't any rush.
+ return fmt.Errorf(
+ "invalid variable syntax: %q. Did you mean 'var.%s'? If this is part of inline `template` parameter\n"+
+ "then you must escape the interpolation with two dollar signs. For\n"+
+ "example: ${a} becomes $${a}.",
+ n, n)
+}
+
+func (i *Interpolater) valueTerraformVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.TerraformVariable,
+ result map[string]ast.Variable) error {
+ if v.Field != "env" {
+ return fmt.Errorf(
+ "%s: only supported key for 'terraform.X' interpolations is 'env'", n)
+ }
+
+ if i.Meta == nil {
+ return fmt.Errorf(
+ "%s: internal error: nil Meta. Please report a bug.", n)
+ }
+
+ result[n] = ast.Variable{Type: ast.TypeString, Value: i.Meta.Env}
+ return nil
+}
+
+func (i *Interpolater) valueUserVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.UserVariable,
+ result map[string]ast.Variable) error {
+ i.VariableValuesLock.Lock()
+ defer i.VariableValuesLock.Unlock()
+ val, ok := i.VariableValues[v.Name]
+ if ok {
+ varValue, err := hil.InterfaceToVariable(val)
+ if err != nil {
+ return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
+ v.Name, val, err)
+ }
+ result[n] = varValue
+ return nil
+ }
+
+ if _, ok := result[n]; !ok && i.Operation == walkValidate {
+ result[n] = unknownVariable()
+ return nil
+ }
+
+ // Look up if we have any variables with this prefix because
+ // those are map overrides. Include those.
+ for k, val := range i.VariableValues {
+ if strings.HasPrefix(k, v.Name+".") {
+ keyComponents := strings.Split(k, ".")
+ overrideKey := keyComponents[len(keyComponents)-1]
+
+ mapInterface, ok := result["var."+v.Name]
+ if !ok {
+ return fmt.Errorf("override for non-existent variable: %s", v.Name)
+ }
+
+ mapVariable := mapInterface.Value.(map[string]ast.Variable)
+
+ varValue, err := hil.InterfaceToVariable(val)
+ if err != nil {
+ return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
+ v.Name, val, err)
+ }
+ mapVariable[overrideKey] = varValue
+ }
+ }
+
+ return nil
+}
+
+func (i *Interpolater) computeResourceVariable(
+ scope *InterpolationScope,
+ v *config.ResourceVariable) (*ast.Variable, error) {
+ id := v.ResourceId()
+ if v.Multi {
+ id = fmt.Sprintf("%s.%d", id, v.Index)
+ }
+
+ i.StateLock.RLock()
+ defer i.StateLock.RUnlock()
+
+ unknownVariable := unknownVariable()
+
+ // These variables must be declared early because of the use of GOTO
+ var isList bool
+ var isMap bool
+
+ // Get the information about this resource variable, and verify
+ // that it exists and such.
+ module, cr, err := i.resourceVariableInfo(scope, v)
+ if err != nil {
+ return nil, err
+ }
+
+ // If we're requesting "count" its a special variable that we grab
+ // directly from the config itself.
+ if v.Field == "count" {
+ var count int
+ if cr != nil {
+ count, err = cr.Count()
+ } else {
+ count, err = i.resourceCountMax(module, cr, v)
+ }
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading %s count: %s",
+ v.ResourceId(),
+ err)
+ }
+
+ return &ast.Variable{Type: ast.TypeInt, Value: count}, nil
+ }
+
+ // Get the resource out from the state. We know the state exists
+ // at this point and if there is a state, we expect there to be a
+ // resource with the given name.
+ var r *ResourceState
+ if module != nil && len(module.Resources) > 0 {
+ var ok bool
+ r, ok = module.Resources[id]
+ if !ok && v.Multi && v.Index == 0 {
+ r, ok = module.Resources[v.ResourceId()]
+ }
+ if !ok {
+ r = nil
+ }
+ }
+ if r == nil || r.Primary == nil {
+ if i.Operation == walkApply || i.Operation == walkPlan {
+ return nil, fmt.Errorf(
+ "Resource '%s' not found for variable '%s'",
+ v.ResourceId(),
+ v.FullKey())
+ }
+
+ // If we have no module in the state yet or count, return empty.
+ // NOTE(@mitchellh): I actually don't know why this is here. During
+ // a refactor I kept this here to maintain the same behavior, but
+ // I'm not sure why its here.
+ if module == nil || len(module.Resources) == 0 {
+ return nil, nil
+ }
+
+ goto MISSING
+ }
+
+ if attr, ok := r.Primary.Attributes[v.Field]; ok {
+ v, err := hil.InterfaceToVariable(attr)
+ return &v, err
+ }
+
+ // computed list or map attribute
+ _, isList = r.Primary.Attributes[v.Field+".#"]
+ _, isMap = r.Primary.Attributes[v.Field+".%"]
+ if isList || isMap {
+ variable, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
+ return &variable, err
+ }
+
+ // At apply time, we can't do the "maybe has it" check below
+ // that we need for plans since parent elements might be computed.
+ // Therefore, it is an error and we're missing the key.
+ //
+ // TODO: test by creating a state and configuration that is referencing
+ // a non-existent variable "foo.bar" where the state only has "foo"
+ // and verify plan works, but apply doesn't.
+ if i.Operation == walkApply || i.Operation == walkDestroy {
+ goto MISSING
+ }
+
+ // We didn't find the exact field, so lets separate the dots
+ // and see if anything along the way is a computed set. i.e. if
+ // we have "foo.0.bar" as the field, check to see if "foo" is
+ // a computed list. If so, then the whole thing is computed.
+ if parts := strings.Split(v.Field, "."); len(parts) > 1 {
+ for i := 1; i < len(parts); i++ {
+ // Lists and sets make this
+ key := fmt.Sprintf("%s.#", strings.Join(parts[:i], "."))
+ if attr, ok := r.Primary.Attributes[key]; ok {
+ v, err := hil.InterfaceToVariable(attr)
+ return &v, err
+ }
+
+ // Maps make this
+ key = fmt.Sprintf("%s", strings.Join(parts[:i], "."))
+ if attr, ok := r.Primary.Attributes[key]; ok {
+ v, err := hil.InterfaceToVariable(attr)
+ return &v, err
+ }
+ }
+ }
+
+MISSING:
+ // Validation for missing interpolations should happen at a higher
+ // semantic level. If we reached this point and don't have variables,
+ // just return the computed value.
+ if scope == nil && scope.Resource == nil {
+ return &unknownVariable, nil
+ }
+
+ // If the operation is refresh, it isn't an error for a value to
+ // be unknown. Instead, we return that the value is computed so
+ // that the graph can continue to refresh other nodes. It doesn't
+ // matter because the config isn't interpolated anyways.
+ //
+ // For a Destroy, we're also fine with computed values, since our goal is
+ // only to get destroy nodes for existing resources.
+ //
+ // For an input walk, computed values are okay to return because we're only
+ // looking for missing variables to prompt the user for.
+ if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput {
+ return &unknownVariable, nil
+ }
+
+ return nil, fmt.Errorf(
+ "Resource '%s' does not have attribute '%s' "+
+ "for variable '%s'",
+ id,
+ v.Field,
+ v.FullKey())
+}
+
+func (i *Interpolater) computeResourceMultiVariable(
+ scope *InterpolationScope,
+ v *config.ResourceVariable) (*ast.Variable, error) {
+ i.StateLock.RLock()
+ defer i.StateLock.RUnlock()
+
+ unknownVariable := unknownVariable()
+
+ // If we're only looking for input, we don't need to expand a
+ // multi-variable. This prevents us from encountering things that should be
+ // known but aren't because the state has yet to be refreshed.
+ if i.Operation == walkInput {
+ return &unknownVariable, nil
+ }
+
+ // Get the information about this resource variable, and verify
+ // that it exists and such.
+ module, cr, err := i.resourceVariableInfo(scope, v)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the keys for all the resources that are created for this resource
+ countMax, err := i.resourceCountMax(module, cr, v)
+ if err != nil {
+ return nil, err
+ }
+
+ // If count is zero, we return an empty list
+ if countMax == 0 {
+ return &ast.Variable{Type: ast.TypeList, Value: []ast.Variable{}}, nil
+ }
+
+ // If we have no module in the state yet or count, return unknown
+ if module == nil || len(module.Resources) == 0 {
+ return &unknownVariable, nil
+ }
+
+ var values []interface{}
+ for idx := 0; idx < countMax; idx++ {
+ id := fmt.Sprintf("%s.%d", v.ResourceId(), idx)
+
+ // ID doesn't have a trailing index. We try both here, but if a value
+ // without a trailing index is found we prefer that. This choice
+ // is for legacy reasons: older versions of TF preferred it.
+ if id == v.ResourceId()+".0" {
+ potential := v.ResourceId()
+ if _, ok := module.Resources[potential]; ok {
+ id = potential
+ }
+ }
+
+ r, ok := module.Resources[id]
+ if !ok {
+ continue
+ }
+
+ if r.Primary == nil {
+ continue
+ }
+
+ if singleAttr, ok := r.Primary.Attributes[v.Field]; ok {
+ if singleAttr == config.UnknownVariableValue {
+ return &unknownVariable, nil
+ }
+
+ values = append(values, singleAttr)
+ continue
+ }
+
+ // computed list or map attribute
+ _, isList := r.Primary.Attributes[v.Field+".#"]
+ _, isMap := r.Primary.Attributes[v.Field+".%"]
+ if !(isList || isMap) {
+ continue
+ }
+ multiAttr, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
+ if err != nil {
+ return nil, err
+ }
+
+ if multiAttr == unknownVariable {
+ return &unknownVariable, nil
+ }
+
+ values = append(values, multiAttr)
+ }
+
+ if len(values) == 0 {
+ // If the operation is refresh, it isn't an error for a value to
+ // be unknown. Instead, we return that the value is computed so
+ // that the graph can continue to refresh other nodes. It doesn't
+ // matter because the config isn't interpolated anyways.
+ //
+ // For a Destroy, we're also fine with computed values, since our goal is
+ // only to get destroy nodes for existing resources.
+ //
+ // For an input walk, computed values are okay to return because we're only
+ // looking for missing variables to prompt the user for.
+ if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput {
+ return &unknownVariable, nil
+ }
+
+ return nil, fmt.Errorf(
+ "Resource '%s' does not have attribute '%s' "+
+ "for variable '%s'",
+ v.ResourceId(),
+ v.Field,
+ v.FullKey())
+ }
+
+ variable, err := hil.InterfaceToVariable(values)
+ return &variable, err
+}
+
+func (i *Interpolater) interpolateComplexTypeAttribute(
+ resourceID string,
+ attributes map[string]string) (ast.Variable, error) {
+
+ // We can now distinguish between lists and maps in state by the count field:
+ // - lists (and by extension, sets) use the traditional .# notation
+ // - maps use the newer .% notation
+ // Consequently here we can decide how to deal with the keys appropriately
+ // based on whether the type is a map of list.
+ if lengthAttr, isList := attributes[resourceID+".#"]; isList {
+ log.Printf("[DEBUG] Interpolating computed list element attribute %s (%s)",
+ resourceID, lengthAttr)
+
+ // In Terraform's internal dotted representation of list-like attributes, the
+ // ".#" count field is marked as unknown to indicate "this whole list is
+ // unknown". We must honor that meaning here so computed references can be
+ // treated properly during the plan phase.
+ if lengthAttr == config.UnknownVariableValue {
+ return unknownVariable(), nil
+ }
+
+ expanded := flatmap.Expand(attributes, resourceID)
+ return hil.InterfaceToVariable(expanded)
+ }
+
+ if lengthAttr, isMap := attributes[resourceID+".%"]; isMap {
+ log.Printf("[DEBUG] Interpolating computed map element attribute %s (%s)",
+ resourceID, lengthAttr)
+
+ // In Terraform's internal dotted representation of map attributes, the
+ // ".%" count field is marked as unknown to indicate "this whole list is
+ // unknown". We must honor that meaning here so computed references can be
+ // treated properly during the plan phase.
+ if lengthAttr == config.UnknownVariableValue {
+ return unknownVariable(), nil
+ }
+
+ expanded := flatmap.Expand(attributes, resourceID)
+ return hil.InterfaceToVariable(expanded)
+ }
+
+ return ast.Variable{}, fmt.Errorf("No complex type %s found", resourceID)
+}
+
+func (i *Interpolater) resourceVariableInfo(
+ scope *InterpolationScope,
+ v *config.ResourceVariable) (*ModuleState, *config.Resource, error) {
+ // Get the module tree that contains our current path. This is
+ // either the current module (path is empty) or a child.
+ modTree := i.Module
+ if len(scope.Path) > 1 {
+ modTree = i.Module.Child(scope.Path[1:])
+ }
+
+ // Get the resource from the configuration so we can verify
+ // that the resource is in the configuration and so we can access
+ // the configuration if we need to.
+ var cr *config.Resource
+ for _, r := range modTree.Config().Resources {
+ if r.Id() == v.ResourceId() {
+ cr = r
+ break
+ }
+ }
+
+ // Get the relevant module
+ module := i.State.ModuleByPath(scope.Path)
+ return module, cr, nil
+}
+
+func (i *Interpolater) resourceCountMax(
+ ms *ModuleState,
+ cr *config.Resource,
+ v *config.ResourceVariable) (int, error) {
+ id := v.ResourceId()
+
+ // If we're NOT applying, then we assume we can read the count
+ // from the state. Plan and so on may not have any state yet so
+ // we do a full interpolation.
+ if i.Operation != walkApply {
+ if cr == nil {
+ return 0, nil
+ }
+
+ count, err := cr.Count()
+ if err != nil {
+ return 0, err
+ }
+
+ return count, nil
+ }
+
+ // We need to determine the list of resource keys to get values from.
+ // This needs to be sorted so the order is deterministic. We used to
+ // use "cr.Count()" but that doesn't work if the count is interpolated
+ // and we can't guarantee that so we instead depend on the state.
+ max := -1
+ for k, _ := range ms.Resources {
+ // Get the index number for this resource
+ index := ""
+ if k == id {
+ // If the key is the id, then its just 0 (no explicit index)
+ index = "0"
+ } else if strings.HasPrefix(k, id+".") {
+ // Grab the index number out of the state
+ index = k[len(id+"."):]
+ if idx := strings.IndexRune(index, '.'); idx >= 0 {
+ index = index[:idx]
+ }
+ }
+
+ // If there was no index then this resource didn't match
+ // the one we're looking for, exit.
+ if index == "" {
+ continue
+ }
+
+ // Turn the index into an int
+ raw, err := strconv.ParseInt(index, 0, 0)
+ if err != nil {
+ return 0, fmt.Errorf(
+ "%s: error parsing index %q as int: %s",
+ id, index, err)
+ }
+
+ // Keep track of this index if its the max
+ if new := int(raw); new > max {
+ max = new
+ }
+ }
+
+ // If we never found any matching resources in the state, we
+ // have zero.
+ if max == -1 {
+ return 0, nil
+ }
+
+ // The result value is "max+1" because we're returning the
+ // max COUNT, not the max INDEX, and we zero-index.
+ return max + 1, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
new file mode 100644
index 00000000..bd32c79f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
@@ -0,0 +1,14 @@
+package terraform
+
+// NodeCountBoundary fixes any "count boundarie" in the state: resources
+// that are named "foo.0" when they should be named "foo"
+type NodeCountBoundary struct{}
+
+func (n *NodeCountBoundary) Name() string {
+ return "meta.count-boundary (count boundary fixup)"
+}
+
+// GraphNodeEvalable
+func (n *NodeCountBoundary) EvalTree() EvalNode {
+ return &EvalCountFixZeroOneBoundaryGlobal{}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
new file mode 100644
index 00000000..e32cea88
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
@@ -0,0 +1,22 @@
+package terraform
+
+// NodeDestroyableDataResource represents a resource that is "plannable":
+// it is ready to be planned in order to create a diff.
+type NodeDestroyableDataResource struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodeDestroyableDataResource) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Just destroy it.
+ var state *InstanceState
+ return &EvalWriteState{
+ Name: stateId,
+ State: &state, // state is nil here
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
new file mode 100644
index 00000000..d504c892
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
@@ -0,0 +1,198 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// NodeRefreshableDataResource represents a resource that is "plannable":
+// it is ready to be planned in order to create a diff.
+type NodeRefreshableDataResource struct {
+ *NodeAbstractCountResource
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ // Grab the state which we read
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Expand the resource count which must be available by now from EvalTree
+ count, err := n.Config.Count()
+ if err != nil {
+ return nil, err
+ }
+
+ // The concrete resource factory we'll use
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ // Add the config and state since we don't do that via transforms
+ a.Config = n.Config
+
+ return &NodeRefreshableDataResourceInstance{
+ NodeAbstractResource: a,
+ }
+ }
+
+ // Start creating the steps
+ steps := []GraphTransformer{
+ // Expand the count.
+ &ResourceCountTransformer{
+ Concrete: concreteResource,
+ Count: count,
+ Addr: n.ResourceAddr(),
+ },
+
+ // Attach the state
+ &AttachStateTransformer{State: state},
+
+ // Targeting
+ &TargetsTransformer{ParsedTargets: n.Targets},
+
+ // Connect references so ordering is correct
+ &ReferenceTransformer{},
+
+ // Make sure there is a single root
+ &RootTransformer{},
+ }
+
+ // Build the graph
+ b := &BasicGraphBuilder{
+ Steps: steps,
+ Validate: true,
+ Name: "NodeRefreshableDataResource",
+ }
+
+ return b.Build(ctx.Path())
+}
+
+// NodeRefreshableDataResourceInstance represents a _single_ resource instance
+// that is refreshable.
+type NodeRefreshableDataResourceInstance struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ }
+
+ // Get the state if we have it, if not we build it
+ rs := n.ResourceState
+ if rs == nil {
+ rs = &ResourceState{}
+ }
+
+ // If the config isn't empty we update the state
+ if n.Config != nil {
+ rs = &ResourceState{
+ Type: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: n.StateReferences(),
+ }
+ }
+
+ // Build the resource for eval
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var config *ResourceConfig
+ var diff *InstanceDiff
+ var provider ResourceProvider
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ // Always destroy the existing state first, since we must
+ // make sure that values from a previous read will not
+ // get interpolated if we end up needing to defer our
+ // loading until apply time.
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: rs.Type,
+ Provider: rs.Provider,
+ Dependencies: rs.Dependencies,
+ State: &state, // state is nil here
+ },
+
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+
+ // The rest of this pass can proceed only if there are no
+ // computed values in our config.
+ // (If there are, we'll deal with this during the plan and
+ // apply phases.)
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 {
+ return true, EvalEarlyExitError{}
+ }
+
+ // If the config explicitly has a depends_on for this
+ // data source, assume the intention is to prevent
+ // refreshing ahead of that dependency.
+ if len(n.Config.DependsOn) > 0 {
+ return true, EvalEarlyExitError{}
+ }
+
+ return true, nil
+ },
+
+ Then: EvalNoop{},
+ },
+
+ // The remainder of this pass is the same as running
+ // a "plan" pass immediately followed by an "apply" pass,
+ // populating the state early so it'll be available to
+ // provider configurations that need this data during
+ // refresh/plan.
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+
+ &EvalReadDataDiff{
+ Info: info,
+ Config: &config,
+ Provider: &provider,
+ Output: &diff,
+ OutputState: &state,
+ },
+
+ &EvalReadDataApply{
+ Info: info,
+ Diff: &diff,
+ Provider: &provider,
+ Output: &state,
+ },
+
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: rs.Type,
+ Provider: rs.Provider,
+ Dependencies: rs.Dependencies,
+ State: &state,
+ },
+
+ &EvalUpdateStateHook{},
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
new file mode 100644
index 00000000..319df1e3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
@@ -0,0 +1,29 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// NodeDestroyableModule represents a module destruction.
+type NodeDestroyableModuleVariable struct {
+ PathValue []string
+}
+
+func (n *NodeDestroyableModuleVariable) Name() string {
+ result := "plan-destroy"
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeDestroyableModuleVariable) Path() []string {
+ return n.PathValue
+}
+
+// GraphNodeEvalable
+func (n *NodeDestroyableModuleVariable) EvalTree() EvalNode {
+ return &EvalDiffDestroyModule{Path: n.PathValue}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
new file mode 100644
index 00000000..13fe8fc3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
@@ -0,0 +1,125 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// NodeApplyableModuleVariable represents a module variable input during
+// the apply step.
+type NodeApplyableModuleVariable struct {
+ PathValue []string
+ Config *config.Variable // Config is the var in the config
+ Value *config.RawConfig // Value is the value that is set
+
+ Module *module.Tree // Antiquated, want to remove
+}
+
+func (n *NodeApplyableModuleVariable) Name() string {
+ result := fmt.Sprintf("var.%s", n.Config.Name)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeApplyableModuleVariable) Path() []string {
+ // We execute in the parent scope (above our own module) so that
+ // we can access the proper interpolations.
+ if len(n.PathValue) > 2 {
+ return n.PathValue[:len(n.PathValue)-1]
+ }
+
+ return rootModulePath
+}
+
+// RemovableIfNotTargeted
+func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool {
+ // We need to add this so that this node will be removed if
+ // it isn't targeted or a dependency of a target.
+ return true
+}
+
+// GraphNodeReferenceGlobal
+func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool {
+ // We have to create fully qualified references because we cross
+ // boundaries here: our ReferenceableName is in one path and our
+ // References are from another path.
+ return true
+}
+
+// GraphNodeReferenceable
+func (n *NodeApplyableModuleVariable) ReferenceableName() []string {
+ return []string{n.Name()}
+}
+
+// GraphNodeReferencer
+func (n *NodeApplyableModuleVariable) References() []string {
+ // If we have no value set, we depend on nothing
+ if n.Value == nil {
+ return nil
+ }
+
+ // Can't depend on anything if we're in the root
+ if len(n.PathValue) < 2 {
+ return nil
+ }
+
+ // Otherwise, we depend on anything that is in our value, but
+ // specifically in the namespace of the parent path.
+ // Create the prefix based on the path
+ var prefix string
+ if p := n.Path(); len(p) > 0 {
+ prefix = modulePrefixStr(p)
+ }
+
+ result := ReferencesFromConfig(n.Value)
+ return modulePrefixList(result, prefix)
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableModuleVariable) EvalTree() EvalNode {
+ // If we have no value, do nothing
+ if n.Value == nil {
+ return &EvalNoop{}
+ }
+
+ // Otherwise, interpolate the value of this variable and set it
+ // within the variables mapping.
+ var config *ResourceConfig
+ variables := make(map[string]interface{})
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalInterpolate{
+ Config: n.Value,
+ Output: &config,
+ },
+
+ &EvalVariableBlock{
+ Config: &config,
+ VariableValues: variables,
+ },
+
+ &EvalCoerceMapVariable{
+ Variables: variables,
+ ModulePath: n.PathValue,
+ ModuleTree: n.Module,
+ },
+
+ &EvalTypeCheckVariable{
+ Variables: variables,
+ ModulePath: n.PathValue,
+ ModuleTree: n.Module,
+ },
+
+ &EvalSetVariables{
+ Module: &n.PathValue[len(n.PathValue)-1],
+ Variables: variables,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
new file mode 100644
index 00000000..e28e6f02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
@@ -0,0 +1,76 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeApplyableOutput represents an output that is "applyable":
+// it is ready to be applied.
+type NodeApplyableOutput struct {
+ PathValue []string
+ Config *config.Output // Config is the output in the config
+}
+
+func (n *NodeApplyableOutput) Name() string {
+ result := fmt.Sprintf("output.%s", n.Config.Name)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeApplyableOutput) Path() []string {
+ return n.PathValue
+}
+
+// RemovableIfNotTargeted
+func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool {
+ // We need to add this so that this node will be removed if
+ // it isn't targeted or a dependency of a target.
+ return true
+}
+
+// GraphNodeReferenceable
+func (n *NodeApplyableOutput) ReferenceableName() []string {
+ name := fmt.Sprintf("output.%s", n.Config.Name)
+ return []string{name}
+}
+
+// GraphNodeReferencer
+func (n *NodeApplyableOutput) References() []string {
+ var result []string
+ result = append(result, n.Config.DependsOn...)
+ result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
+ for _, v := range result {
+ split := strings.Split(v, "/")
+ for i, s := range split {
+ split[i] = s + ".destroy"
+ }
+
+ result = append(result, strings.Join(split, "/"))
+ }
+
+ return result
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableOutput) EvalTree() EvalNode {
+ return &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh, walkPlan, walkApply,
+ walkDestroy, walkInput, walkValidate},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalWriteOutput{
+ Name: n.Config.Name,
+ Sensitive: n.Config.Sensitive,
+ Value: n.Config.RawConfig,
+ },
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
new file mode 100644
index 00000000..636a15df
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
@@ -0,0 +1,35 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// NodeOutputOrphan represents an output that is an orphan.
+type NodeOutputOrphan struct {
+ OutputName string
+ PathValue []string
+}
+
+func (n *NodeOutputOrphan) Name() string {
+ result := fmt.Sprintf("output.%s (orphan)", n.OutputName)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeOutputOrphan) Path() []string {
+ return n.PathValue
+}
+
+// GraphNodeEvalable
+func (n *NodeOutputOrphan) EvalTree() EvalNode {
+ return &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
+ Node: &EvalDeleteOutput{
+ Name: n.OutputName,
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
new file mode 100644
index 00000000..8e2c176f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
@@ -0,0 +1,11 @@
+package terraform
+
+// NodeApplyableProvider represents a provider during an apply.
+type NodeApplyableProvider struct {
+ *NodeAbstractProvider
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableProvider) EvalTree() EvalNode {
+ return ProviderEvalTree(n.NameValue, n.ProviderConfig())
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
new file mode 100644
index 00000000..6cc83656
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
@@ -0,0 +1,85 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ConcreteProviderNodeFunc is a callback type used to convert an
+// abstract provider to a concrete one of some type.
+type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex
+
+// NodeAbstractProvider represents a provider that has no associated operations.
+// It registers all the common interfaces across operations for providers.
+type NodeAbstractProvider struct {
+ NameValue string
+ PathValue []string
+
+ // The fields below will be automatically set using the Attach
+ // interfaces if you're running those transforms, but also be explicitly
+ // set if you already have that information.
+
+ Config *config.ProviderConfig
+}
+
+func (n *NodeAbstractProvider) Name() string {
+ result := fmt.Sprintf("provider.%s", n.NameValue)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeAbstractProvider) Path() []string {
+ return n.PathValue
+}
+
+// RemovableIfNotTargeted
+func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool {
+ // We need to add this so that this node will be removed if
+ // it isn't targeted or a dependency of a target.
+ return true
+}
+
+// GraphNodeReferencer
+func (n *NodeAbstractProvider) References() []string {
+ if n.Config == nil {
+ return nil
+ }
+
+ return ReferencesFromConfig(n.Config.RawConfig)
+}
+
+// GraphNodeProvider
+func (n *NodeAbstractProvider) ProviderName() string {
+ return n.NameValue
+}
+
+// GraphNodeProvider
+func (n *NodeAbstractProvider) ProviderConfig() *config.RawConfig {
+ if n.Config == nil {
+ return nil
+ }
+
+ return n.Config.RawConfig
+}
+
+// GraphNodeAttachProvider
+func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) {
+ n.Config = c
+}
+
+// GraphNodeDotter impl.
+func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
+ return &dag.DotNode{
+ Name: name,
+ Attrs: map[string]string{
+ "label": n.Name(),
+ "shape": "diamond",
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
new file mode 100644
index 00000000..25e7e620
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
@@ -0,0 +1,38 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// NodeDisabledProvider represents a provider that is disabled. A disabled
+// provider does nothing. It exists to properly set inheritance information
+// for child providers.
+type NodeDisabledProvider struct {
+ *NodeAbstractProvider
+}
+
+func (n *NodeDisabledProvider) Name() string {
+ return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name())
+}
+
+// GraphNodeEvalable
+func (n *NodeDisabledProvider) EvalTree() EvalNode {
+ var resourceConfig *ResourceConfig
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalInterpolate{
+ Config: n.ProviderConfig(),
+ Output: &resourceConfig,
+ },
+ &EvalBuildProviderConfig{
+ Provider: n.ProviderName(),
+ Config: &resourceConfig,
+ Output: &resourceConfig,
+ },
+ &EvalSetProviderConfig{
+ Provider: n.ProviderName(),
+ Config: &resourceConfig,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
new file mode 100644
index 00000000..bb117c1d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
@@ -0,0 +1,44 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeProvisioner represents a provider that has no associated operations.
+// It registers all the common interfaces across operations for providers.
+type NodeProvisioner struct {
+ NameValue string
+ PathValue []string
+
+ // The fields below will be automatically set using the Attach
+ // interfaces if you're running those transforms, but also be explicitly
+ // set if you already have that information.
+
+ Config *config.ProviderConfig
+}
+
+func (n *NodeProvisioner) Name() string {
+ result := fmt.Sprintf("provisioner.%s", n.NameValue)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeProvisioner) Path() []string {
+ return n.PathValue
+}
+
+// GraphNodeProvisioner
+func (n *NodeProvisioner) ProvisionerName() string {
+ return n.NameValue
+}
+
+// GraphNodeEvalable impl.
+func (n *NodeProvisioner) EvalTree() EvalNode {
+ return &EvalInitProvisioner{Name: n.NameValue}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
new file mode 100644
index 00000000..50bb7079
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
@@ -0,0 +1,240 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ConcreteResourceNodeFunc is a callback type used to convert an
+// abstract resource to a concrete one of some type.
+type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex
+
+// GraphNodeResource is implemented by any nodes that represent a resource.
+// The type of operation cannot be assumed, only that this node represents
+// the given resource.
+type GraphNodeResource interface {
+ ResourceAddr() *ResourceAddress
+}
+
+// NodeAbstractResource represents a resource that has no associated
+// operations. It registers all the interfaces for a resource that common
+// across multiple operation types.
+type NodeAbstractResource struct {
+ Addr *ResourceAddress // Addr is the address for this resource
+
+ // The fields below will be automatically set using the Attach
+ // interfaces if you're running those transforms, but also be explicitly
+ // set if you already have that information.
+
+ Config *config.Resource // Config is the resource in the config
+ ResourceState *ResourceState // ResourceState is the ResourceState for this
+
+ Targets []ResourceAddress // Set from GraphNodeTargetable
+}
+
+func (n *NodeAbstractResource) Name() string {
+ return n.Addr.String()
+}
+
+// GraphNodeSubPath
+func (n *NodeAbstractResource) Path() []string {
+ return n.Addr.Path
+}
+
+// GraphNodeReferenceable
+func (n *NodeAbstractResource) ReferenceableName() []string {
+ // We always are referenceable as "type.name" as long as
+ // we have a config or address. Determine what that value is.
+ var id string
+ if n.Config != nil {
+ id = n.Config.Id()
+ } else if n.Addr != nil {
+ addrCopy := n.Addr.Copy()
+ addrCopy.Path = nil // ReferenceTransformer handles paths
+ addrCopy.Index = -1 // We handle indexes below
+ id = addrCopy.String()
+ } else {
+ // No way to determine our type.name, just return
+ return nil
+ }
+
+ var result []string
+
+ // Always include our own ID. This is primarily for backwards
+ // compatibility with states that didn't yet support the more
+ // specific dep string.
+ result = append(result, id)
+
+ // We represent all multi-access
+ result = append(result, fmt.Sprintf("%s.*", id))
+
+ // We represent either a specific number, or all numbers
+ suffix := "N"
+ if n.Addr != nil {
+ idx := n.Addr.Index
+ if idx == -1 {
+ idx = 0
+ }
+
+ suffix = fmt.Sprintf("%d", idx)
+ }
+ result = append(result, fmt.Sprintf("%s.%s", id, suffix))
+
+ return result
+}
+
+// GraphNodeReferencer
+func (n *NodeAbstractResource) References() []string {
+ // If we have a config, that is our source of truth
+ if c := n.Config; c != nil {
+ // Grab all the references
+ var result []string
+ result = append(result, c.DependsOn...)
+ result = append(result, ReferencesFromConfig(c.RawCount)...)
+ result = append(result, ReferencesFromConfig(c.RawConfig)...)
+ for _, p := range c.Provisioners {
+ if p.When == config.ProvisionerWhenCreate {
+ result = append(result, ReferencesFromConfig(p.ConnInfo)...)
+ result = append(result, ReferencesFromConfig(p.RawConfig)...)
+ }
+ }
+
+ return uniqueStrings(result)
+ }
+
+ // If we have state, that is our next source
+ if s := n.ResourceState; s != nil {
+ return s.Dependencies
+ }
+
+ return nil
+}
+
+// StateReferences returns the dependencies to put into the state for
+// this resource.
+func (n *NodeAbstractResource) StateReferences() []string {
+ self := n.ReferenceableName()
+
+ // Determine what our "prefix" is for checking for references to
+ // ourself.
+ addrCopy := n.Addr.Copy()
+ addrCopy.Index = -1
+ selfPrefix := addrCopy.String() + "."
+
+ depsRaw := n.References()
+ deps := make([]string, 0, len(depsRaw))
+ for _, d := range depsRaw {
+ // Ignore any variable dependencies
+ if strings.HasPrefix(d, "var.") {
+ continue
+ }
+
+ // If this has a backup ref, ignore those for now. The old state
+ // file never contained those and I'd rather store the rich types we
+ // add in the future.
+ if idx := strings.IndexRune(d, '/'); idx != -1 {
+ d = d[:idx]
+ }
+
+ // If we're referencing ourself, then ignore it
+ found := false
+ for _, s := range self {
+ if d == s {
+ found = true
+ }
+ }
+ if found {
+ continue
+ }
+
+ // If this is a reference to ourself and a specific index, we keep
+ // it. For example, if this resource is "foo.bar" and the reference
+ // is "foo.bar.0" then we keep it exact. Otherwise, we strip it.
+ if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) {
+ d = d[:len(d)-2]
+ }
+
+ // This is sad. The dependencies are currently in the format of
+ // "module.foo.bar" (the full field). This strips the field off.
+ if strings.HasPrefix(d, "module.") {
+ parts := strings.SplitN(d, ".", 3)
+ d = strings.Join(parts[0:2], ".")
+ }
+
+ deps = append(deps, d)
+ }
+
+ return deps
+}
+
+// GraphNodeProviderConsumer
+func (n *NodeAbstractResource) ProvidedBy() []string {
+ // If we have a config we prefer that above all else
+ if n.Config != nil {
+ return []string{resourceProvider(n.Config.Type, n.Config.Provider)}
+ }
+
+ // If we have state, then we will use the provider from there
+ if n.ResourceState != nil && n.ResourceState.Provider != "" {
+ return []string{n.ResourceState.Provider}
+ }
+
+ // Use our type
+ return []string{resourceProvider(n.Addr.Type, "")}
+}
+
+// GraphNodeProvisionerConsumer
+func (n *NodeAbstractResource) ProvisionedBy() []string {
+ // If we have no configuration, then we have no provisioners
+ if n.Config == nil {
+ return nil
+ }
+
+ // Build the list of provisioners we need based on the configuration.
+ // It is okay to have duplicates here.
+ result := make([]string, len(n.Config.Provisioners))
+ for i, p := range n.Config.Provisioners {
+ result[i] = p.Type
+ }
+
+ return result
+}
+
+// GraphNodeResource, GraphNodeAttachResourceState
+func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress {
+ return n.Addr
+}
+
+// GraphNodeAddressable, TODO: remove, used by target, should unify
+func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress {
+ return n.ResourceAddr()
+}
+
+// GraphNodeTargetable
+func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) {
+ n.Targets = targets
+}
+
+// GraphNodeAttachResourceState
+func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) {
+ n.ResourceState = s
+}
+
+// GraphNodeAttachResourceConfig
+func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) {
+ n.Config = c
+}
+
+// GraphNodeDotter impl.
+func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
+ return &dag.DotNode{
+ Name: name,
+ Attrs: map[string]string{
+ "label": n.Name(),
+ "shape": "box",
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
new file mode 100644
index 00000000..573570d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
@@ -0,0 +1,50 @@
+package terraform
+
+// NodeAbstractCountResource should be embedded instead of NodeAbstractResource
+// if the resource has a `count` value that needs to be expanded.
+//
+// The embedder should implement `DynamicExpand` to process the count.
+type NodeAbstractCountResource struct {
+ *NodeAbstractResource
+
+ // Validate, if true, will perform the validation for the count.
+ // This should only be turned on for the "validate" operation.
+ Validate bool
+}
+
+// GraphNodeEvalable
+func (n *NodeAbstractCountResource) EvalTree() EvalNode {
+ // We only check if the count is computed if we're not validating.
+ // If we're validating we allow computed counts since they just turn
+ // into more computed values.
+ var evalCountCheckComputed EvalNode
+ if !n.Validate {
+ evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config}
+ }
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ // The EvalTree for a plannable resource primarily involves
+ // interpolating the count since it can contain variables
+ // we only just received access to.
+ //
+ // With the interpolated count, we can then DynamicExpand
+ // into the proper number of instances.
+ &EvalInterpolate{Config: n.Config.RawCount},
+
+ // Check if the count is computed
+ evalCountCheckComputed,
+
+ // If validation is enabled, perform the validation
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ return n.Validate, nil
+ },
+
+ Then: &EvalValidateCount{Resource: n.Config},
+ },
+
+ &EvalCountFixZeroOneBoundary{Resource: n.Config},
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
new file mode 100644
index 00000000..3599782b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
@@ -0,0 +1,357 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeApplyableResource represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodeApplyableResource struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeCreator
+func (n *NodeApplyableResource) CreateAddr() *ResourceAddress {
+ return n.NodeAbstractResource.Addr
+}
+
+// GraphNodeReferencer, overriding NodeAbstractResource
+func (n *NodeApplyableResource) References() []string {
+ result := n.NodeAbstractResource.References()
+
+ // The "apply" side of a resource generally also depends on the
+ // destruction of its dependencies as well. For example, if a LB
+ // references a set of VMs with ${vm.foo.*.id}, then we must wait for
+ // the destruction so we get the newly updated list of VMs.
+ //
+ // The exception here is CBD. When CBD is set, we don't do this since
+ // it would create a cycle. By not creating a cycle, we require two
+ // applies since the first apply the creation step will use the OLD
+ // values (pre-destroy) and the second step will update.
+ //
+ // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x).
+ // We mimic that behavior here now and can improve upon it in the future.
+ //
+ // This behavior is tested in graph_build_apply_test.go to test ordering.
+ cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy
+ if !cbd {
+ // The "apply" side of a resource always depends on the destruction
+ // of all its dependencies in addition to the creation.
+ for _, v := range result {
+ result = append(result, v+".destroy")
+ }
+ }
+
+ return result
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableResource) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ }
+
+ // Build the resource for eval
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Determine the dependencies for the state.
+ stateDeps := n.StateReferences()
+
+ // Eval info is different depending on what kind of resource this is
+ switch n.Config.Mode {
+ case config.ManagedResourceMode:
+ return n.evalTreeManagedResource(
+ stateId, info, resource, stateDeps,
+ )
+ case config.DataResourceMode:
+ return n.evalTreeDataResource(
+ stateId, info, resource, stateDeps)
+ default:
+ panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
+ }
+}
+
+func (n *NodeApplyableResource) evalTreeDataResource(
+ stateId string, info *InstanceInfo,
+ resource *Resource, stateDeps []string) EvalNode {
+ var provider ResourceProvider
+ var config *ResourceConfig
+ var diff *InstanceDiff
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ // Build the instance info
+ &EvalInstanceInfo{
+ Info: info,
+ },
+
+ // Get the saved diff for apply
+ &EvalReadDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+
+ // Stop here if we don't actually have a diff
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if diff == nil {
+ return true, EvalEarlyExitError{}
+ }
+
+ if diff.GetAttributesLen() == 0 {
+ return true, EvalEarlyExitError{}
+ }
+
+ return true, nil
+ },
+ Then: EvalNoop{},
+ },
+
+ // We need to re-interpolate the config here, rather than
+ // just using the diff's values directly, because we've
+ // potentially learned more variable values during the
+ // apply pass that weren't known when the diff was produced.
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+
+ // Make a new diff with our newly-interpolated config.
+ &EvalReadDataDiff{
+ Info: info,
+ Config: &config,
+ Previous: &diff,
+ Provider: &provider,
+ Output: &diff,
+ },
+
+ &EvalReadDataApply{
+ Info: info,
+ Diff: &diff,
+ Provider: &provider,
+ Output: &state,
+ },
+
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+
+ // Clear the diff now that we've applied it, so
+ // later nodes won't see a diff that's now a no-op.
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: nil,
+ },
+
+ &EvalUpdateStateHook{},
+ },
+ }
+}
+
+func (n *NodeApplyableResource) evalTreeManagedResource(
+ stateId string, info *InstanceInfo,
+ resource *Resource, stateDeps []string) EvalNode {
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var provider ResourceProvider
+ var diff, diffApply *InstanceDiff
+ var state *InstanceState
+ var resourceConfig *ResourceConfig
+ var err error
+ var createNew bool
+ var createBeforeDestroyEnabled bool
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ // Build the instance info
+ &EvalInstanceInfo{
+ Info: info,
+ },
+
+ // Get the saved diff for apply
+ &EvalReadDiff{
+ Name: stateId,
+ Diff: &diffApply,
+ },
+
+ // We don't want to do any destroys
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if diffApply == nil {
+ return true, EvalEarlyExitError{}
+ }
+
+ if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 {
+ return true, EvalEarlyExitError{}
+ }
+
+ diffApply.SetDestroy(false)
+ return true, nil
+ },
+ Then: EvalNoop{},
+ },
+
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ destroy := false
+ if diffApply != nil {
+ destroy = diffApply.GetDestroy() || diffApply.RequiresNew()
+ }
+
+ createBeforeDestroyEnabled =
+ n.Config.Lifecycle.CreateBeforeDestroy &&
+ destroy
+
+ return createBeforeDestroyEnabled, nil
+ },
+ Then: &EvalDeposeState{
+ Name: stateId,
+ },
+ },
+
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &resourceConfig,
+ },
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ // Re-run validation to catch any errors we missed, e.g. type
+ // mismatches on computed values.
+ &EvalValidateResource{
+ Provider: &provider,
+ Config: &resourceConfig,
+ ResourceName: n.Config.Name,
+ ResourceType: n.Config.Type,
+ ResourceMode: n.Config.Mode,
+ IgnoreWarnings: true,
+ },
+ &EvalDiff{
+ Info: info,
+ Config: &resourceConfig,
+ Resource: n.Config,
+ Provider: &provider,
+ Diff: &diffApply,
+ State: &state,
+ OutputDiff: &diffApply,
+ },
+
+ // Get the saved diff
+ &EvalReadDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+
+ // Compare the diffs
+ &EvalCompareDiff{
+ Info: info,
+ One: &diff,
+ Two: &diffApply,
+ },
+
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ // Call pre-apply hook
+ &EvalApplyPre{
+ Info: info,
+ State: &state,
+ Diff: &diffApply,
+ },
+ &EvalApply{
+ Info: info,
+ State: &state,
+ Diff: &diffApply,
+ Provider: &provider,
+ Output: &state,
+ Error: &err,
+ CreateNew: &createNew,
+ },
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+ &EvalApplyProvisioners{
+ Info: info,
+ State: &state,
+ Resource: n.Config,
+ InterpResource: resource,
+ CreateNew: &createNew,
+ Error: &err,
+ When: config.ProvisionerWhenCreate,
+ },
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ return createBeforeDestroyEnabled && err != nil, nil
+ },
+ Then: &EvalUndeposeState{
+ Name: stateId,
+ State: &state,
+ },
+ Else: &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+ },
+
+ // We clear the diff out here so that future nodes
+ // don't see a diff that is already complete. There
+ // is no longer a diff!
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: nil,
+ },
+
+ &EvalApplyPost{
+ Info: info,
+ State: &state,
+ Error: &err,
+ },
+ &EvalUpdateStateHook{},
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
new file mode 100644
index 00000000..c2efd2c3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
@@ -0,0 +1,288 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeDestroyResource represents a resource that is to be destroyed.
+type NodeDestroyResource struct {
+ *NodeAbstractResource
+}
+
+func (n *NodeDestroyResource) Name() string {
+ return n.NodeAbstractResource.Name() + " (destroy)"
+}
+
+// GraphNodeDestroyer
+func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress {
+ return n.Addr
+}
+
+// GraphNodeDestroyerCBD
+func (n *NodeDestroyResource) CreateBeforeDestroy() bool {
+ // If we have no config, we just assume no
+ if n.Config == nil {
+ return false
+ }
+
+ return n.Config.Lifecycle.CreateBeforeDestroy
+}
+
+// GraphNodeDestroyerCBD
+func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error {
+ // If we have no config, do nothing since it won't affect the
+ // create step anyways.
+ if n.Config == nil {
+ return nil
+ }
+
+ // Set CBD to true
+ n.Config.Lifecycle.CreateBeforeDestroy = true
+
+ return nil
+}
+
+// GraphNodeReferenceable, overriding NodeAbstractResource
+func (n *NodeDestroyResource) ReferenceableName() []string {
+ // We modify our referenceable name to have the suffix of ".destroy"
+ // since depending on the creation side doesn't necessarilly mean
+ // depending on destruction.
+ suffix := ".destroy"
+
+ // If we're CBD, we also append "-cbd". This is because CBD will setup
+ // its own edges (in CBDEdgeTransformer). Depending on the "destroy"
+ // side generally doesn't mean depending on CBD as well. See GH-11349
+ if n.CreateBeforeDestroy() {
+ suffix += "-cbd"
+ }
+
+ result := n.NodeAbstractResource.ReferenceableName()
+ for i, v := range result {
+ result[i] = v + suffix
+ }
+
+ return result
+}
+
+// GraphNodeReferencer, overriding NodeAbstractResource
+func (n *NodeDestroyResource) References() []string {
+ // If we have a config, then we need to include destroy-time dependencies
+ if c := n.Config; c != nil {
+ var result []string
+ for _, p := range c.Provisioners {
+ // We include conn info and config for destroy time provisioners
+ // as dependencies that we have.
+ if p.When == config.ProvisionerWhenDestroy {
+ result = append(result, ReferencesFromConfig(p.ConnInfo)...)
+ result = append(result, ReferencesFromConfig(p.RawConfig)...)
+ }
+ }
+
+ return result
+ }
+
+ return nil
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ // If we have no config we do nothing
+ if n.Addr == nil {
+ return nil, nil
+ }
+
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Start creating the steps
+ steps := make([]GraphTransformer, 0, 5)
+
+ // We want deposed resources in the state to be destroyed
+ steps = append(steps, &DeposedTransformer{
+ State: state,
+ View: n.Addr.stateId(),
+ })
+
+ // Target
+ steps = append(steps, &TargetsTransformer{
+ ParsedTargets: n.Targets,
+ })
+
+ // Always end with the root being added
+ steps = append(steps, &RootTransformer{})
+
+ // Build the graph
+ b := &BasicGraphBuilder{
+ Steps: steps,
+ Name: "NodeResourceDestroy",
+ }
+ return b.Build(ctx.Path())
+}
+
+// GraphNodeEvalable
+func (n *NodeDestroyResource) EvalTree() EvalNode {
+ // stateId is the ID to put into the state
+ stateId := n.Addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: n.Addr.Type,
+ uniqueExtra: "destroy",
+ }
+
+ // Build the resource for eval
+ addr := n.Addr
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Get our state
+ rs := n.ResourceState
+ if rs == nil {
+ rs = &ResourceState{}
+ }
+
+ var diffApply *InstanceDiff
+ var provider ResourceProvider
+ var state *InstanceState
+ var err error
+ return &EvalOpFilter{
+ Ops: []walkOperation{walkApply, walkDestroy},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ // Get the saved diff for apply
+ &EvalReadDiff{
+ Name: stateId,
+ Diff: &diffApply,
+ },
+
+ // Filter the diff so we only get the destroy
+ &EvalFilterDiff{
+ Diff: &diffApply,
+ Output: &diffApply,
+ Destroy: true,
+ },
+
+ // If we're not destroying, then compare diffs
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if diffApply != nil && diffApply.GetDestroy() {
+ return true, nil
+ }
+
+ return true, EvalEarlyExitError{}
+ },
+ Then: EvalNoop{},
+ },
+
+ // Load the instance info so we have the module path set
+ &EvalInstanceInfo{Info: info},
+
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalRequireState{
+ State: &state,
+ },
+
+ // Call pre-apply hook
+ &EvalApplyPre{
+ Info: info,
+ State: &state,
+ Diff: &diffApply,
+ },
+
+ // Run destroy provisioners if not tainted
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if state != nil && state.Tainted {
+ return false, nil
+ }
+
+ return true, nil
+ },
+
+ Then: &EvalApplyProvisioners{
+ Info: info,
+ State: &state,
+ Resource: n.Config,
+ InterpResource: resource,
+ Error: &err,
+ When: config.ProvisionerWhenDestroy,
+ },
+ },
+
+ // If we have a provisioning error, then we just call
+ // the post-apply hook now.
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ return err != nil, nil
+ },
+
+ Then: &EvalApplyPost{
+ Info: info,
+ State: &state,
+ Error: &err,
+ },
+ },
+
+ // Make sure we handle data sources properly.
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if n.Addr == nil {
+ return false, fmt.Errorf("nil address")
+ }
+
+ if n.Addr.Mode == config.DataResourceMode {
+ return true, nil
+ }
+
+ return false, nil
+ },
+
+ Then: &EvalReadDataApply{
+ Info: info,
+ Diff: &diffApply,
+ Provider: &provider,
+ Output: &state,
+ },
+ Else: &EvalApply{
+ Info: info,
+ State: &state,
+ Diff: &diffApply,
+ Provider: &provider,
+ Output: &state,
+ Error: &err,
+ },
+ },
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Addr.Type,
+ Provider: rs.Provider,
+ Dependencies: rs.Dependencies,
+ State: &state,
+ },
+ &EvalApplyPost{
+ Info: info,
+ State: &state,
+ Error: &err,
+ },
+ &EvalUpdateStateHook{},
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
new file mode 100644
index 00000000..52bbf88a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
@@ -0,0 +1,83 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// NodePlannableResource represents a resource that is "plannable":
+// it is ready to be planned in order to create a diff.
+type NodePlannableResource struct {
+ *NodeAbstractCountResource
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ // Grab the state which we read
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Expand the resource count which must be available by now from EvalTree
+ count, err := n.Config.Count()
+ if err != nil {
+ return nil, err
+ }
+
+ // The concrete resource factory we'll use
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ // Add the config and state since we don't do that via transforms
+ a.Config = n.Config
+
+ return &NodePlannableResourceInstance{
+ NodeAbstractResource: a,
+ }
+ }
+
+ // The concrete resource factory we'll use for oprhans
+ concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex {
+ // Add the config and state since we don't do that via transforms
+ a.Config = n.Config
+
+ return &NodePlannableResourceOrphan{
+ NodeAbstractResource: a,
+ }
+ }
+
+ // Start creating the steps
+ steps := []GraphTransformer{
+ // Expand the count.
+ &ResourceCountTransformer{
+ Concrete: concreteResource,
+ Count: count,
+ Addr: n.ResourceAddr(),
+ },
+
+ // Add the count orphans
+ &OrphanResourceCountTransformer{
+ Concrete: concreteResourceOrphan,
+ Count: count,
+ Addr: n.ResourceAddr(),
+ State: state,
+ },
+
+ // Attach the state
+ &AttachStateTransformer{State: state},
+
+ // Targeting
+ &TargetsTransformer{ParsedTargets: n.Targets},
+
+ // Connect references so ordering is correct
+ &ReferenceTransformer{},
+
+ // Make sure there is a single root
+ &RootTransformer{},
+ }
+
+ // Build the graph
+ b := &BasicGraphBuilder{
+ Steps: steps,
+ Validate: true,
+ Name: "NodePlannableResource",
+ }
+ return b.Build(ctx.Path())
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
new file mode 100644
index 00000000..9b02362b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
@@ -0,0 +1,53 @@
+package terraform
+
+// NodePlanDestroyableResource represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodePlanDestroyableResource struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeDestroyer
+func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress {
+ return n.Addr
+}
+
+// GraphNodeEvalable
+func (n *NodePlanDestroyableResource) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var diff *InstanceDiff
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalDiffDestroy{
+ Info: info,
+ State: &state,
+ Output: &diff,
+ },
+ &EvalCheckPreventDestroy{
+ Resource: n.Config,
+ Diff: &diff,
+ },
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
new file mode 100644
index 00000000..b5295690
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
@@ -0,0 +1,190 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodePlannableResourceInstance represents a _single_ resource
+// instance that is plannable. This means this represents a single
+// count index, for example.
+type NodePlannableResourceInstance struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodePlannableResourceInstance) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ ModulePath: normalizeModulePath(addr.Path),
+ }
+
+ // Build the resource for eval
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Determine the dependencies for the state.
+ stateDeps := n.StateReferences()
+
+ // Eval info is different depending on what kind of resource this is
+ switch n.Config.Mode {
+ case config.ManagedResourceMode:
+ return n.evalTreeManagedResource(
+ stateId, info, resource, stateDeps,
+ )
+ case config.DataResourceMode:
+ return n.evalTreeDataResource(
+ stateId, info, resource, stateDeps)
+ default:
+ panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
+ }
+}
+
+func (n *NodePlannableResourceInstance) evalTreeDataResource(
+ stateId string, info *InstanceInfo,
+ resource *Resource, stateDeps []string) EvalNode {
+ var provider ResourceProvider
+ var config *ResourceConfig
+ var diff *InstanceDiff
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+
+ // We need to re-interpolate the config here because some
+ // of the attributes may have become computed during
+ // earlier planning, due to other resources having
+ // "requires new resource" diffs.
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0
+
+ // If the configuration is complete and we
+ // already have a state then we don't need to
+ // do any further work during apply, because we
+ // already populated the state during refresh.
+ if !computed && state != nil {
+ return true, EvalEarlyExitError{}
+ }
+
+ return true, nil
+ },
+ Then: EvalNoop{},
+ },
+
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+
+ &EvalReadDataDiff{
+ Info: info,
+ Config: &config,
+ Provider: &provider,
+ Output: &diff,
+ OutputState: &state,
+ },
+
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+ },
+ }
+}
+
+func (n *NodePlannableResourceInstance) evalTreeManagedResource(
+ stateId string, info *InstanceInfo,
+ resource *Resource, stateDeps []string) EvalNode {
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var provider ResourceProvider
+ var diff *InstanceDiff
+ var state *InstanceState
+ var resourceConfig *ResourceConfig
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &resourceConfig,
+ },
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ // Re-run validation to catch any errors we missed, e.g. type
+ // mismatches on computed values.
+ &EvalValidateResource{
+ Provider: &provider,
+ Config: &resourceConfig,
+ ResourceName: n.Config.Name,
+ ResourceType: n.Config.Type,
+ ResourceMode: n.Config.Mode,
+ IgnoreWarnings: true,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalDiff{
+ Name: stateId,
+ Info: info,
+ Config: &resourceConfig,
+ Resource: n.Config,
+ Provider: &provider,
+ State: &state,
+ OutputDiff: &diff,
+ OutputState: &state,
+ },
+ &EvalCheckPreventDestroy{
+ Resource: n.Config,
+ Diff: &diff,
+ },
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
new file mode 100644
index 00000000..73d6e41f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
@@ -0,0 +1,54 @@
+package terraform
+
+// NodePlannableResourceOrphan represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodePlannableResourceOrphan struct {
+ *NodeAbstractResource
+}
+
+func (n *NodePlannableResourceOrphan) Name() string {
+ return n.NodeAbstractResource.Name() + " (orphan)"
+}
+
+// GraphNodeEvalable
+func (n *NodePlannableResourceOrphan) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ ModulePath: normalizeModulePath(addr.Path),
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var diff *InstanceDiff
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalDiffDestroy{
+ Info: info,
+ State: &state,
+ Output: &diff,
+ },
+ &EvalCheckPreventDestroy{
+ Resource: n.Config,
+ ResourceId: stateId,
+ Diff: &diff,
+ },
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
new file mode 100644
index 00000000..3a44926c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
@@ -0,0 +1,100 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeRefreshableResource represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodeRefreshableResource struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeDestroyer
+func (n *NodeRefreshableResource) DestroyAddr() *ResourceAddress {
+ return n.Addr
+}
+
+// GraphNodeEvalable
+func (n *NodeRefreshableResource) EvalTree() EvalNode {
+ // Eval info is different depending on what kind of resource this is
+ switch mode := n.Addr.Mode; mode {
+ case config.ManagedResourceMode:
+ return n.evalTreeManagedResource()
+
+ case config.DataResourceMode:
+ // Get the data source node. If we don't have a configuration
+ // then it is an orphan so we destroy it (remove it from the state).
+ var dn GraphNodeEvalable
+ if n.Config != nil {
+ dn = &NodeRefreshableDataResourceInstance{
+ NodeAbstractResource: n.NodeAbstractResource,
+ }
+ } else {
+ dn = &NodeDestroyableDataResource{
+ NodeAbstractResource: n.NodeAbstractResource,
+ }
+ }
+
+ return dn.EvalTree()
+ default:
+ panic(fmt.Errorf("unsupported resource mode %s", mode))
+ }
+}
+
+func (n *NodeRefreshableResource) evalTreeManagedResource() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var provider ResourceProvider
+ var state *InstanceState
+
+ // This happened during initial development. All known cases were
+ // fixed and tested but as a sanity check let's assert here.
+ if n.ResourceState == nil {
+ err := fmt.Errorf(
+ "No resource state attached for addr: %s\n\n"+
+ "This is a bug. Please report this to Terraform with your configuration\n"+
+ "and state attached. Please be careful to scrub any sensitive information.",
+ addr)
+ return &EvalReturnError{Error: &err}
+ }
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalRefresh{
+ Info: info,
+ Provider: &provider,
+ State: &state,
+ Output: &state,
+ },
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.ResourceState.Type,
+ Provider: n.ResourceState.Provider,
+ Dependencies: n.ResourceState.Dependencies,
+ State: &state,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
new file mode 100644
index 00000000..f528f24b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
@@ -0,0 +1,158 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// NodeValidatableResource represents a resource that is used for validation
+// only.
+type NodeValidatableResource struct {
+ *NodeAbstractCountResource
+}
+
+// GraphNodeEvalable
+func (n *NodeValidatableResource) EvalTree() EvalNode {
+ // Ensure we're validating
+ c := n.NodeAbstractCountResource
+ c.Validate = true
+ return c.EvalTree()
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ // Grab the state which we read
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Expand the resource count which must be available by now from EvalTree
+ count := 1
+ if n.Config.RawCount.Value() != unknownValue() {
+ var err error
+ count, err = n.Config.Count()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // The concrete resource factory we'll use
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ // Add the config and state since we don't do that via transforms
+ a.Config = n.Config
+
+ return &NodeValidatableResourceInstance{
+ NodeAbstractResource: a,
+ }
+ }
+
+ // Start creating the steps
+ steps := []GraphTransformer{
+ // Expand the count.
+ &ResourceCountTransformer{
+ Concrete: concreteResource,
+ Count: count,
+ Addr: n.ResourceAddr(),
+ },
+
+ // Attach the state
+ &AttachStateTransformer{State: state},
+
+ // Targeting
+ &TargetsTransformer{ParsedTargets: n.Targets},
+
+ // Connect references so ordering is correct
+ &ReferenceTransformer{},
+
+ // Make sure there is a single root
+ &RootTransformer{},
+ }
+
+ // Build the graph
+ b := &BasicGraphBuilder{
+ Steps: steps,
+ Validate: true,
+ Name: "NodeValidatableResource",
+ }
+
+ return b.Build(ctx.Path())
+}
+
+// This represents a _single_ resource instance to validate.
+type NodeValidatableResourceInstance struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodeValidatableResourceInstance) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // Build the resource for eval
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var config *ResourceConfig
+ var provider ResourceProvider
+
+ seq := &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalValidateResourceSelfRef{
+ Addr: &addr,
+ Config: &n.Config.RawConfig,
+ },
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+ &EvalValidateResource{
+ Provider: &provider,
+ Config: &config,
+ ResourceName: n.Config.Name,
+ ResourceType: n.Config.Type,
+ ResourceMode: n.Config.Mode,
+ },
+ },
+ }
+
+ // Validate all the provisioners
+ for _, p := range n.Config.Provisioners {
+ var provisioner ResourceProvisioner
+ var connConfig *ResourceConfig
+ seq.Nodes = append(
+ seq.Nodes,
+ &EvalGetProvisioner{
+ Name: p.Type,
+ Output: &provisioner,
+ },
+ &EvalInterpolate{
+ Config: p.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+ &EvalInterpolate{
+ Config: p.ConnInfo.Copy(),
+ Resource: resource,
+ Output: &connConfig,
+ },
+ &EvalValidateProvisioner{
+ Provisioner: &provisioner,
+ Config: &config,
+ ConnConfig: &connConfig,
+ },
+ )
+ }
+
+ return seq
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
new file mode 100644
index 00000000..cb61a4e3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
@@ -0,0 +1,22 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeRootVariable represents a root variable input.
+type NodeRootVariable struct {
+ Config *config.Variable
+}
+
+func (n *NodeRootVariable) Name() string {
+ result := fmt.Sprintf("var.%s", n.Config.Name)
+ return result
+}
+
+// GraphNodeReferenceable
+func (n *NodeRootVariable) ReferenceableName() []string {
+ return []string{n.Name()}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go
new file mode 100644
index 00000000..ca99685a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/path.go
@@ -0,0 +1,24 @@
+package terraform
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+)
+
+// PathCacheKey returns a cache key for a module path.
+//
+// TODO: test
+func PathCacheKey(path []string) string {
+ // There is probably a better way to do this, but this is working for now.
+ // We just create an MD5 hash of all the MD5 hashes of all the path
+ // elements. This gets us the property that it is unique per ordering.
+ hash := md5.New()
+ for _, p := range path {
+ single := md5.Sum([]byte(p))
+ if _, err := hash.Write(single[:]); err != nil {
+ panic(err)
+ }
+ }
+
+ return hex.EncodeToString(hash.Sum(nil))
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go
new file mode 100644
index 00000000..ea088450
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go
@@ -0,0 +1,153 @@
+package terraform
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/hashicorp/terraform/config/module"
+)
+
+func init() {
+ gob.Register(make([]interface{}, 0))
+ gob.Register(make([]map[string]interface{}, 0))
+ gob.Register(make(map[string]interface{}))
+ gob.Register(make(map[string]string))
+}
+
+// Plan represents a single Terraform execution plan, which contains
+// all the information necessary to make an infrastructure change.
+//
+// A plan has to contain basically the entire state of the world
+// necessary to make a change: the state, diff, config, backend config, etc.
+// This is so that it can run alone without any other data.
+type Plan struct {
+ Diff *Diff
+ Module *module.Tree
+ State *State
+ Vars map[string]interface{}
+ Targets []string
+
+ // Backend is the backend that this plan should use and store data with.
+ Backend *BackendState
+
+ once sync.Once
+}
+
+// Context returns a Context with the data encapsulated in this plan.
+//
+// The following fields in opts are overridden by the plan: Config,
+// Diff, State, Variables.
+func (p *Plan) Context(opts *ContextOpts) (*Context, error) {
+ opts.Diff = p.Diff
+ opts.Module = p.Module
+ opts.State = p.State
+ opts.Targets = p.Targets
+
+ opts.Variables = make(map[string]interface{})
+ for k, v := range p.Vars {
+ opts.Variables[k] = v
+ }
+
+ return NewContext(opts)
+}
+
+func (p *Plan) String() string {
+ buf := new(bytes.Buffer)
+ buf.WriteString("DIFF:\n\n")
+ buf.WriteString(p.Diff.String())
+ buf.WriteString("\n\nSTATE:\n\n")
+ buf.WriteString(p.State.String())
+ return buf.String()
+}
+
+func (p *Plan) init() {
+ p.once.Do(func() {
+ if p.Diff == nil {
+ p.Diff = new(Diff)
+ p.Diff.init()
+ }
+
+ if p.State == nil {
+ p.State = new(State)
+ p.State.init()
+ }
+
+ if p.Vars == nil {
+ p.Vars = make(map[string]interface{})
+ }
+ })
+}
+
+// The format byte is prefixed into the plan file format so that we have
+// the ability in the future to change the file format if we want for any
+// reason.
+const planFormatMagic = "tfplan"
+const planFormatVersion byte = 1
+
+// ReadPlan reads a plan structure out of a reader in the format that
+// was written by WritePlan.
+func ReadPlan(src io.Reader) (*Plan, error) {
+ var result *Plan
+ var err error
+ n := 0
+
+ // Verify the magic bytes
+ magic := make([]byte, len(planFormatMagic))
+ for n < len(magic) {
+ n, err = src.Read(magic[n:])
+ if err != nil {
+ return nil, fmt.Errorf("error while reading magic bytes: %s", err)
+ }
+ }
+ if string(magic) != planFormatMagic {
+ return nil, fmt.Errorf("not a valid plan file")
+ }
+
+ // Verify the version is something we can read
+ var formatByte [1]byte
+ n, err = src.Read(formatByte[:])
+ if err != nil {
+ return nil, err
+ }
+ if n != len(formatByte) {
+ return nil, errors.New("failed to read plan version byte")
+ }
+
+ if formatByte[0] != planFormatVersion {
+ return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0])
+ }
+
+ dec := gob.NewDecoder(src)
+ if err := dec.Decode(&result); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// WritePlan writes a plan somewhere in a binary format.
+func WritePlan(d *Plan, dst io.Writer) error {
+ // Write the magic bytes so we can determine the file format later
+ n, err := dst.Write([]byte(planFormatMagic))
+ if err != nil {
+ return err
+ }
+ if n != len(planFormatMagic) {
+ return errors.New("failed to write plan format magic bytes")
+ }
+
+ // Write a version byte so we can iterate on version at some point
+ n, err = dst.Write([]byte{planFormatVersion})
+ if err != nil {
+ return err
+ }
+ if n != 1 {
+ return errors.New("failed to write plan version byte")
+ }
+
+ return gob.NewEncoder(dst).Encode(d)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go
new file mode 100644
index 00000000..0acf0beb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go
@@ -0,0 +1,360 @@
+package terraform
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/mitchellh/copystructure"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// ResourceProvisionerConfig is used to pair a provisioner
+// with its provided configuration. This allows us to use singleton
+// instances of each ResourceProvisioner and to keep the relevant
+// configuration instead of instantiating a new Provisioner for each
+// resource.
+type ResourceProvisionerConfig struct {
+ Type string
+ Provisioner ResourceProvisioner
+ Config *ResourceConfig
+ RawConfig *config.RawConfig
+ ConnInfo *config.RawConfig
+}
+
+// Resource encapsulates a resource, its configuration, its provider,
+// its current state, and potentially a desired diff from the state it
+// wants to reach.
+type Resource struct {
+ // These are all used by the new EvalNode stuff.
+ Name string
+ Type string
+ CountIndex int
+
+ // These aren't really used anymore anywhere, but we keep them around
+ // since we haven't done a proper cleanup yet.
+ Id string
+ Info *InstanceInfo
+ Config *ResourceConfig
+ Dependencies []string
+ Diff *InstanceDiff
+ Provider ResourceProvider
+ State *InstanceState
+ Provisioners []*ResourceProvisionerConfig
+ Flags ResourceFlag
+}
+
+// ResourceKind specifies what kind of instance we're working with, whether
+// its a primary instance, a tainted instance, or an orphan.
+type ResourceFlag byte
+
+// InstanceInfo is used to hold information about the instance and/or
+// resource being modified.
+type InstanceInfo struct {
+ // Id is a unique name to represent this instance. This is not related
+ // to InstanceState.ID in any way.
+ Id string
+
+ // ModulePath is the complete path of the module containing this
+ // instance.
+ ModulePath []string
+
+ // Type is the resource type of this instance
+ Type string
+
+ // uniqueExtra is an internal field that can be populated to supply
+ // extra metadata that is used to identify a unique instance in
+ // the graph walk. This will be appended to HumanID when uniqueId
+ // is called.
+ uniqueExtra string
+}
+
+// HumanId is a unique Id that is human-friendly and useful for UI elements.
+func (i *InstanceInfo) HumanId() string {
+ if i == nil {
+ return "<nil>"
+ }
+
+ if len(i.ModulePath) <= 1 {
+ return i.Id
+ }
+
+ return fmt.Sprintf(
+ "module.%s.%s",
+ strings.Join(i.ModulePath[1:], "."),
+ i.Id)
+}
+
+func (i *InstanceInfo) uniqueId() string {
+ prefix := i.HumanId()
+ if v := i.uniqueExtra; v != "" {
+ prefix += " " + v
+ }
+
+ return prefix
+}
+
+// ResourceConfig holds the configuration given for a resource. This is
+// done instead of a raw `map[string]interface{}` type so that rich
+// methods can be added to it to make dealing with it easier.
+type ResourceConfig struct {
+ ComputedKeys []string
+ Raw map[string]interface{}
+ Config map[string]interface{}
+
+ raw *config.RawConfig
+}
+
+// NewResourceConfig creates a new ResourceConfig from a config.RawConfig.
+func NewResourceConfig(c *config.RawConfig) *ResourceConfig {
+ result := &ResourceConfig{raw: c}
+ result.interpolateForce()
+ return result
+}
+
+// DeepCopy performs a deep copy of the configuration. This makes it safe
+// to modify any of the structures that are part of the resource config without
+// affecting the original configuration.
+func (c *ResourceConfig) DeepCopy() *ResourceConfig {
+ // DeepCopying a nil should return a nil to avoid panics
+ if c == nil {
+ return nil
+ }
+
+ // Copy, this will copy all the exported attributes
+ copy, err := copystructure.Config{Lock: true}.Copy(c)
+ if err != nil {
+ panic(err)
+ }
+
+ // Force the type
+ result := copy.(*ResourceConfig)
+
+ // For the raw configuration, we can just use its own copy method
+ result.raw = c.raw.Copy()
+
+ return result
+}
+
+// Equal checks the equality of two resource configs.
+func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool {
+ // If either are nil, then they're only equal if they're both nil
+ if c == nil || c2 == nil {
+ return c == c2
+ }
+
+ // Sort the computed keys so they're deterministic
+ sort.Strings(c.ComputedKeys)
+ sort.Strings(c2.ComputedKeys)
+
+ // Two resource configs if their exported properties are equal.
+ // We don't compare "raw" because it is never used again after
+ // initialization and for all intents and purposes they are equal
+ // if the exported properties are equal.
+ check := [][2]interface{}{
+ {c.ComputedKeys, c2.ComputedKeys},
+ {c.Raw, c2.Raw},
+ {c.Config, c2.Config},
+ }
+ for _, pair := range check {
+ if !reflect.DeepEqual(pair[0], pair[1]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// CheckSet checks that the given list of configuration keys is
+// properly set. If not, errors are returned for each unset key.
+//
+// This is useful to be called in the Validate method of a ResourceProvider.
+func (c *ResourceConfig) CheckSet(keys []string) []error {
+ var errs []error
+
+ for _, k := range keys {
+ if !c.IsSet(k) {
+ errs = append(errs, fmt.Errorf("%s must be set", k))
+ }
+ }
+
+ return errs
+}
+
+// Get looks up a configuration value by key and returns the value.
+//
+// The second return value is true if the get was successful. Get will
+// return the raw value if the key is computed, so you should pair this
+// with IsComputed.
+func (c *ResourceConfig) Get(k string) (interface{}, bool) {
+ // We aim to get a value from the configuration. If it is computed,
+ // then we return the pure raw value.
+ source := c.Config
+ if c.IsComputed(k) {
+ source = c.Raw
+ }
+
+ return c.get(k, source)
+}
+
+// GetRaw looks up a configuration value by key and returns the value,
+// from the raw, uninterpolated config.
+//
+// The second return value is true if the get was successful. Get will
+// not succeed if the value is being computed.
+func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) {
+ return c.get(k, c.Raw)
+}
+
+// IsComputed returns whether the given key is computed or not.
+func (c *ResourceConfig) IsComputed(k string) bool {
+ // The next thing we do is check the config if we get a computed
+ // value out of it.
+ v, ok := c.get(k, c.Config)
+ if !ok {
+ return false
+ }
+
+ // If value is nil, then it isn't computed
+ if v == nil {
+ return false
+ }
+
+ // Test if the value contains an unknown value
+ var w unknownCheckWalker
+ if err := reflectwalk.Walk(v, &w); err != nil {
+ panic(err)
+ }
+
+ return w.Unknown
+}
+
+// IsSet checks if the key in the configuration is set. A key is set if
+// it has a value or the value is being computed (is unknown currently).
+//
+// This function should be used rather than checking the keys of the
+// raw configuration itself, since a key may be omitted from the raw
+// configuration if it is being computed.
+func (c *ResourceConfig) IsSet(k string) bool {
+ if c == nil {
+ return false
+ }
+
+ if c.IsComputed(k) {
+ return true
+ }
+
+ if _, ok := c.Get(k); ok {
+ return true
+ }
+
+ return false
+}
+
+func (c *ResourceConfig) get(
+ k string, raw map[string]interface{}) (interface{}, bool) {
+ parts := strings.Split(k, ".")
+ if len(parts) == 1 && parts[0] == "" {
+ parts = nil
+ }
+
+ var current interface{} = raw
+ var previous interface{} = nil
+ for i, part := range parts {
+ if current == nil {
+ return nil, false
+ }
+
+ cv := reflect.ValueOf(current)
+ switch cv.Kind() {
+ case reflect.Map:
+ previous = current
+ v := cv.MapIndex(reflect.ValueOf(part))
+ if !v.IsValid() {
+ if i > 0 && i != (len(parts)-1) {
+ tryKey := strings.Join(parts[i:], ".")
+ v := cv.MapIndex(reflect.ValueOf(tryKey))
+ if !v.IsValid() {
+ return nil, false
+ }
+
+ return v.Interface(), true
+ }
+
+ return nil, false
+ }
+
+ current = v.Interface()
+ case reflect.Slice:
+ previous = current
+
+ if part == "#" {
+ // If any value in a list is computed, this whole thing
+ // is computed and we can't read any part of it.
+ for i := 0; i < cv.Len(); i++ {
+ if v := cv.Index(i).Interface(); v == unknownValue() {
+ return v, true
+ }
+ }
+
+ current = cv.Len()
+ } else {
+ i, err := strconv.ParseInt(part, 0, 0)
+ if err != nil {
+ return nil, false
+ }
+ if i >= int64(cv.Len()) {
+ return nil, false
+ }
+ current = cv.Index(int(i)).Interface()
+ }
+ case reflect.String:
+ // This happens when map keys contain "." and have a common
+ // prefix so were split as path components above.
+ actualKey := strings.Join(parts[i-1:], ".")
+ if prevMap, ok := previous.(map[string]interface{}); ok {
+ v, ok := prevMap[actualKey]
+ return v, ok
+ }
+
+ return nil, false
+ default:
+ panic(fmt.Sprintf("Unknown kind: %s", cv.Kind()))
+ }
+ }
+
+ return current, true
+}
+
+// interpolateForce is a temporary thing. We want to get rid of interpolate
+// above and likewise this, but it can only be done after the f-ast-graph
+// refactor is complete.
+func (c *ResourceConfig) interpolateForce() {
+ if c.raw == nil {
+ var err error
+ c.raw, err = config.NewRawConfig(make(map[string]interface{}))
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ c.ComputedKeys = c.raw.UnknownKeys()
+ c.Raw = c.raw.RawMap()
+ c.Config = c.raw.Config()
+}
+
+// unknownCheckWalker
+type unknownCheckWalker struct {
+ Unknown bool
+}
+
+func (w *unknownCheckWalker) Primitive(v reflect.Value) error {
+ if v.Interface() == unknownValue() {
+ w.Unknown = true
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
new file mode 100644
index 00000000..a8a0c955
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
@@ -0,0 +1,301 @@
+package terraform
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// ResourceAddress is a way of identifying an individual resource (or,
+// eventually, a subset of resources) within the state. It is used for Targets.
+type ResourceAddress struct {
+ // Addresses a resource falling somewhere in the module path
+ // When specified alone, addresses all resources within a module path
+ Path []string
+
+ // Addresses a specific resource that occurs in a list
+ Index int
+
+ InstanceType InstanceType
+ InstanceTypeSet bool
+ Name string
+ Type string
+ Mode config.ResourceMode // significant only if InstanceTypeSet
+}
+
+// Copy returns a copy of this ResourceAddress
+func (r *ResourceAddress) Copy() *ResourceAddress {
+ if r == nil {
+ return nil
+ }
+
+ n := &ResourceAddress{
+ Path: make([]string, 0, len(r.Path)),
+ Index: r.Index,
+ InstanceType: r.InstanceType,
+ Name: r.Name,
+ Type: r.Type,
+ Mode: r.Mode,
+ }
+ for _, p := range r.Path {
+ n.Path = append(n.Path, p)
+ }
+ return n
+}
+
+// String outputs the address that parses into this address.
+func (r *ResourceAddress) String() string {
+ var result []string
+ for _, p := range r.Path {
+ result = append(result, "module", p)
+ }
+
+ switch r.Mode {
+ case config.ManagedResourceMode:
+ // nothing to do
+ case config.DataResourceMode:
+ result = append(result, "data")
+ default:
+ panic(fmt.Errorf("unsupported resource mode %s", r.Mode))
+ }
+
+ if r.Type != "" {
+ result = append(result, r.Type)
+ }
+
+ if r.Name != "" {
+ name := r.Name
+ if r.InstanceTypeSet {
+ switch r.InstanceType {
+ case TypePrimary:
+ name += ".primary"
+ case TypeDeposed:
+ name += ".deposed"
+ case TypeTainted:
+ name += ".tainted"
+ }
+ }
+
+ if r.Index >= 0 {
+ name += fmt.Sprintf("[%d]", r.Index)
+ }
+ result = append(result, name)
+ }
+
+ return strings.Join(result, ".")
+}
+
+// stateId returns the ID that this resource should be entered with
+// in the state. This is also used for diffs. In the future, we'd like to
+// move away from this string field so I don't export this.
+func (r *ResourceAddress) stateId() string {
+ result := fmt.Sprintf("%s.%s", r.Type, r.Name)
+ switch r.Mode {
+ case config.ManagedResourceMode:
+ // Done
+ case config.DataResourceMode:
+ result = fmt.Sprintf("data.%s", result)
+ default:
+ panic(fmt.Errorf("unknown resource mode: %s", r.Mode))
+ }
+ if r.Index >= 0 {
+ result += fmt.Sprintf(".%d", r.Index)
+ }
+
+ return result
+}
+
+// parseResourceAddressConfig creates a resource address from a config.Resource
+func parseResourceAddressConfig(r *config.Resource) (*ResourceAddress, error) {
+ return &ResourceAddress{
+ Type: r.Type,
+ Name: r.Name,
+ Index: -1,
+ InstanceType: TypePrimary,
+ Mode: r.Mode,
+ }, nil
+}
+
+// parseResourceAddressInternal parses the somewhat bespoke resource
+// identifier used in states and diffs, such as "instance.name.0".
+func parseResourceAddressInternal(s string) (*ResourceAddress, error) {
+ // Split based on ".". Every resource address should have at least two
+ // elements (type and name).
+ parts := strings.Split(s, ".")
+ if len(parts) < 2 || len(parts) > 4 {
+ return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
+ }
+
+ // Data resource if we have at least 3 parts and the first one is data
+ mode := config.ManagedResourceMode
+ if len(parts) > 2 && parts[0] == "data" {
+ mode = config.DataResourceMode
+ parts = parts[1:]
+ }
+
+ // If we're not a data resource and we have more than 3, then it is an error
+ if len(parts) > 3 && mode != config.DataResourceMode {
+ return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
+ }
+
+ // Build the parts of the resource address that are guaranteed to exist
+ addr := &ResourceAddress{
+ Type: parts[0],
+ Name: parts[1],
+ Index: -1,
+ InstanceType: TypePrimary,
+ Mode: mode,
+ }
+
+ // If we have more parts, then we have an index. Parse that.
+ if len(parts) > 2 {
+ idx, err := strconv.ParseInt(parts[2], 0, 0)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err)
+ }
+
+ addr.Index = int(idx)
+ }
+
+ return addr, nil
+}
+
+func ParseResourceAddress(s string) (*ResourceAddress, error) {
+ matches, err := tokenizeResourceAddress(s)
+ if err != nil {
+ return nil, err
+ }
+ mode := config.ManagedResourceMode
+ if matches["data_prefix"] != "" {
+ mode = config.DataResourceMode
+ }
+ resourceIndex, err := ParseResourceIndex(matches["index"])
+ if err != nil {
+ return nil, err
+ }
+ instanceType, err := ParseInstanceType(matches["instance_type"])
+ if err != nil {
+ return nil, err
+ }
+ path := ParseResourcePath(matches["path"])
+
+ // not allowed to say "data." without a type following
+ if mode == config.DataResourceMode && matches["type"] == "" {
+ return nil, fmt.Errorf("must target specific data instance")
+ }
+
+ return &ResourceAddress{
+ Path: path,
+ Index: resourceIndex,
+ InstanceType: instanceType,
+ InstanceTypeSet: matches["instance_type"] != "",
+ Name: matches["name"],
+ Type: matches["type"],
+ Mode: mode,
+ }, nil
+}
+
+func (addr *ResourceAddress) Equals(raw interface{}) bool {
+ other, ok := raw.(*ResourceAddress)
+ if !ok {
+ return false
+ }
+
+ pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 ||
+ reflect.DeepEqual(addr.Path, other.Path)
+
+ indexMatch := addr.Index == -1 ||
+ other.Index == -1 ||
+ addr.Index == other.Index
+
+ nameMatch := addr.Name == "" ||
+ other.Name == "" ||
+ addr.Name == other.Name
+
+ typeMatch := addr.Type == "" ||
+ other.Type == "" ||
+ addr.Type == other.Type
+
+ // mode is significant only when type is set
+ modeMatch := addr.Type == "" ||
+ other.Type == "" ||
+ addr.Mode == other.Mode
+
+ return pathMatch &&
+ indexMatch &&
+ addr.InstanceType == other.InstanceType &&
+ nameMatch &&
+ typeMatch &&
+ modeMatch
+}
+
+func ParseResourceIndex(s string) (int, error) {
+ if s == "" {
+ return -1, nil
+ }
+ return strconv.Atoi(s)
+}
+
+func ParseResourcePath(s string) []string {
+ if s == "" {
+ return nil
+ }
+ parts := strings.Split(s, ".")
+ path := make([]string, 0, len(parts))
+ for _, s := range parts {
+ // Due to the limitations of the regexp match below, the path match has
+ // some noise in it we have to filter out :|
+ if s == "" || s == "module" {
+ continue
+ }
+ path = append(path, s)
+ }
+ return path
+}
+
+func ParseInstanceType(s string) (InstanceType, error) {
+ switch s {
+ case "", "primary":
+ return TypePrimary, nil
+ case "deposed":
+ return TypeDeposed, nil
+ case "tainted":
+ return TypeTainted, nil
+ default:
+ return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s)
+ }
+}
+
+func tokenizeResourceAddress(s string) (map[string]string, error) {
+ // Example of portions of the regexp below using the
+ // string "aws_instance.web.tainted[1]"
+ re := regexp.MustCompile(`\A` +
+ // "module.foo.module.bar" (optional)
+ `(?P<path>(?:module\.[^.]+\.?)*)` +
+ // possibly "data.", if targeting is a data resource
+ `(?P<data_prefix>(?:data\.)?)` +
+ // "aws_instance.web" (optional when module path specified)
+ `(?:(?P<type>[^.]+)\.(?P<name>[^.[]+))?` +
+ // "tainted" (optional, omission implies: "primary")
+ `(?:\.(?P<instance_type>\w+))?` +
+ // "1" (optional, omission implies: "0")
+ `(?:\[(?P<index>\d+)\])?` +
+ `\z`)
+
+ groupNames := re.SubexpNames()
+ rawMatches := re.FindAllStringSubmatch(s, -1)
+ if len(rawMatches) != 1 {
+ return nil, fmt.Errorf("Problem parsing address: %q", s)
+ }
+
+ matches := make(map[string]string)
+ for i, m := range rawMatches[0] {
+ matches[groupNames[i]] = m
+ }
+
+ return matches, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
new file mode 100644
index 00000000..1a68c869
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
@@ -0,0 +1,204 @@
+package terraform
+
+// ResourceProvider is an interface that must be implemented by any
+// resource provider: the thing that creates and manages the resources in
+// a Terraform configuration.
+//
+// Important implementation note: All returned pointers, such as
+// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to
+// shared data. Terraform is highly parallel and assumes that this data is safe
+// to read/write in parallel so it must be unique references. Note that it is
+// safe to return arguments as results, however.
+type ResourceProvider interface {
+ /*********************************************************************
+ * Functions related to the provider
+ *********************************************************************/
+
+ // Input is called to ask the provider to ask the user for input
+ // for completing the configuration if necesarry.
+ //
+ // This may or may not be called, so resource provider writers shouldn't
+ // rely on this being available to set some default values for validate
+ // later. Example of a situation where this wouldn't be called is if
+ // the user is not using a TTY.
+ Input(UIInput, *ResourceConfig) (*ResourceConfig, error)
+
+ // Validate is called once at the beginning with the raw configuration
+ // (no interpolation done) and can return a list of warnings and/or
+ // errors.
+ //
+ // This is called once with the provider configuration only. It may not
+ // be called at all if no provider configuration is given.
+ //
+ // This should not assume that any values of the configurations are valid.
+ // The primary use case of this call is to check that required keys are
+ // set.
+ Validate(*ResourceConfig) ([]string, []error)
+
+ // Configure configures the provider itself with the configuration
+ // given. This is useful for setting things like access keys.
+ //
+ // This won't be called at all if no provider configuration is given.
+ //
+ // Configure returns an error if it occurred.
+ Configure(*ResourceConfig) error
+
+ // Resources returns all the available resource types that this provider
+ // knows how to manage.
+ Resources() []ResourceType
+
+ // Stop is called when the provider should halt any in-flight actions.
+ //
+ // This can be used to make a nicer Ctrl-C experience for Terraform.
+ // Even if this isn't implemented to do anything (just returns nil),
+ // Terraform will still cleanly stop after the currently executing
+ // graph node is complete. However, this API can be used to make more
+ // efficient halts.
+ //
+ // Stop doesn't have to and shouldn't block waiting for in-flight actions
+ // to complete. It should take any action it wants and return immediately
+ // acknowledging it has received the stop request. Terraform core will
+ // automatically not make any further API calls to the provider soon
+ // after Stop is called (technically exactly once the currently executing
+ // graph nodes are complete).
+ //
+ // The error returned, if non-nil, is assumed to mean that signaling the
+ // stop somehow failed and that the user should expect potentially waiting
+ // a longer period of time.
+ Stop() error
+
+ /*********************************************************************
+ * Functions related to individual resources
+ *********************************************************************/
+
+ // ValidateResource is called once at the beginning with the raw
+ // configuration (no interpolation done) and can return a list of warnings
+ // and/or errors.
+ //
+ // This is called once per resource.
+ //
+ // This should not assume any of the values in the resource configuration
+ // are valid since it is possible they have to be interpolated still.
+ // The primary use case of this call is to check that the required keys
+ // are set and that the general structure is correct.
+ ValidateResource(string, *ResourceConfig) ([]string, []error)
+
+ // Apply applies a diff to a specific resource and returns the new
+ // resource state along with an error.
+ //
+ // If the resource state given has an empty ID, then a new resource
+ // is expected to be created.
+ Apply(
+ *InstanceInfo,
+ *InstanceState,
+ *InstanceDiff) (*InstanceState, error)
+
+ // Diff diffs a resource versus a desired state and returns
+ // a diff.
+ Diff(
+ *InstanceInfo,
+ *InstanceState,
+ *ResourceConfig) (*InstanceDiff, error)
+
+ // Refresh refreshes a resource and updates all of its attributes
+ // with the latest information.
+ Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error)
+
+ /*********************************************************************
+ * Functions related to importing
+ *********************************************************************/
+
+ // ImportState requests that the given resource be imported.
+ //
+ // The returned InstanceState only requires ID be set. Importing
+ // will always call Refresh after the state to complete it.
+ //
+ // IMPORTANT: InstanceState doesn't have the resource type attached
+ // to it. A type must be specified on the state via the Ephemeral
+ // field on the state.
+ //
+ // This function can return multiple states. Normally, an import
+ // will map 1:1 to a physical resource. However, some resources map
+ // to multiple. For example, an AWS security group may contain many rules.
+ // Each rule is represented by a separate resource in Terraform,
+ // therefore multiple states are returned.
+ ImportState(*InstanceInfo, string) ([]*InstanceState, error)
+
+ /*********************************************************************
+ * Functions related to data resources
+ *********************************************************************/
+
+ // ValidateDataSource is called once at the beginning with the raw
+ // configuration (no interpolation done) and can return a list of warnings
+ // and/or errors.
+ //
+ // This is called once per data source instance.
+ //
+ // This should not assume any of the values in the resource configuration
+ // are valid since it is possible they have to be interpolated still.
+ // The primary use case of this call is to check that the required keys
+ // are set and that the general structure is correct.
+ ValidateDataSource(string, *ResourceConfig) ([]string, []error)
+
+ // DataSources returns all of the available data sources that this
+ // provider implements.
+ DataSources() []DataSource
+
+ // ReadDataDiff produces a diff that represents the state that will
+ // be produced when the given data source is read using a later call
+ // to ReadDataApply.
+ ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
+
+ // ReadDataApply initializes a data instance using the configuration
+ // in a diff produced by ReadDataDiff.
+ ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
+}
+
+// ResourceProviderCloser is an interface that providers that can close
+// connections that aren't needed anymore must implement.
+type ResourceProviderCloser interface {
+ Close() error
+}
+
+// ResourceType is a type of resource that a resource provider can manage.
+type ResourceType struct {
+ Name string // Name of the resource, example "instance" (no provider prefix)
+ Importable bool // Whether this resource supports importing
+}
+
+// DataSource is a data source that a resource provider implements.
+type DataSource struct {
+ Name string
+}
+
+// ResourceProviderFactory is a function type that creates a new instance
+// of a resource provider.
+type ResourceProviderFactory func() (ResourceProvider, error)
+
+// ResourceProviderFactoryFixed is a helper that creates a
+// ResourceProviderFactory that just returns some fixed provider.
+func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory {
+ return func() (ResourceProvider, error) {
+ return p, nil
+ }
+}
+
+func ProviderHasResource(p ResourceProvider, n string) bool {
+ for _, rt := range p.Resources() {
+ if rt.Name == n {
+ return true
+ }
+ }
+
+ return false
+}
+
+func ProviderHasDataSource(p ResourceProvider, n string) bool {
+ for _, rt := range p.DataSources() {
+ if rt.Name == n {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
new file mode 100644
index 00000000..f5315339
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
@@ -0,0 +1,297 @@
+package terraform
+
+import "sync"
+
+// MockResourceProvider implements ResourceProvider but mocks out all the
+// calls for testing purposes.
+type MockResourceProvider struct {
+ sync.Mutex
+
+ // Anything you want, in case you need to store extra data with the mock.
+ Meta interface{}
+
+ CloseCalled bool
+ CloseError error
+ InputCalled bool
+ InputInput UIInput
+ InputConfig *ResourceConfig
+ InputReturnConfig *ResourceConfig
+ InputReturnError error
+ InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error)
+ ApplyCalled bool
+ ApplyInfo *InstanceInfo
+ ApplyState *InstanceState
+ ApplyDiff *InstanceDiff
+ ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error)
+ ApplyReturn *InstanceState
+ ApplyReturnError error
+ ConfigureCalled bool
+ ConfigureConfig *ResourceConfig
+ ConfigureFn func(*ResourceConfig) error
+ ConfigureReturnError error
+ DiffCalled bool
+ DiffInfo *InstanceInfo
+ DiffState *InstanceState
+ DiffDesired *ResourceConfig
+ DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error)
+ DiffReturn *InstanceDiff
+ DiffReturnError error
+ RefreshCalled bool
+ RefreshInfo *InstanceInfo
+ RefreshState *InstanceState
+ RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error)
+ RefreshReturn *InstanceState
+ RefreshReturnError error
+ ResourcesCalled bool
+ ResourcesReturn []ResourceType
+ ReadDataApplyCalled bool
+ ReadDataApplyInfo *InstanceInfo
+ ReadDataApplyDiff *InstanceDiff
+ ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
+ ReadDataApplyReturn *InstanceState
+ ReadDataApplyReturnError error
+ ReadDataDiffCalled bool
+ ReadDataDiffInfo *InstanceInfo
+ ReadDataDiffDesired *ResourceConfig
+ ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
+ ReadDataDiffReturn *InstanceDiff
+ ReadDataDiffReturnError error
+ StopCalled bool
+ StopFn func() error
+ StopReturnError error
+ DataSourcesCalled bool
+ DataSourcesReturn []DataSource
+ ValidateCalled bool
+ ValidateConfig *ResourceConfig
+ ValidateFn func(*ResourceConfig) ([]string, []error)
+ ValidateReturnWarns []string
+ ValidateReturnErrors []error
+ ValidateResourceFn func(string, *ResourceConfig) ([]string, []error)
+ ValidateResourceCalled bool
+ ValidateResourceType string
+ ValidateResourceConfig *ResourceConfig
+ ValidateResourceReturnWarns []string
+ ValidateResourceReturnErrors []error
+ ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error)
+ ValidateDataSourceCalled bool
+ ValidateDataSourceType string
+ ValidateDataSourceConfig *ResourceConfig
+ ValidateDataSourceReturnWarns []string
+ ValidateDataSourceReturnErrors []error
+
+ ImportStateCalled bool
+ ImportStateInfo *InstanceInfo
+ ImportStateID string
+ ImportStateReturn []*InstanceState
+ ImportStateReturnError error
+ ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error)
+}
+
+func (p *MockResourceProvider) Close() error {
+ p.CloseCalled = true
+ return p.CloseError
+}
+
+func (p *MockResourceProvider) Input(
+ input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
+ p.InputCalled = true
+ p.InputInput = input
+ p.InputConfig = c
+ if p.InputFn != nil {
+ return p.InputFn(input, c)
+ }
+ return p.InputReturnConfig, p.InputReturnError
+}
+
+func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ValidateCalled = true
+ p.ValidateConfig = c
+ if p.ValidateFn != nil {
+ return p.ValidateFn(c)
+ }
+ return p.ValidateReturnWarns, p.ValidateReturnErrors
+}
+
+func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ValidateResourceCalled = true
+ p.ValidateResourceType = t
+ p.ValidateResourceConfig = c
+
+ if p.ValidateResourceFn != nil {
+ return p.ValidateResourceFn(t, c)
+ }
+
+ return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors
+}
+
+func (p *MockResourceProvider) Configure(c *ResourceConfig) error {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ConfigureCalled = true
+ p.ConfigureConfig = c
+
+ if p.ConfigureFn != nil {
+ return p.ConfigureFn(c)
+ }
+
+ return p.ConfigureReturnError
+}
+
+func (p *MockResourceProvider) Stop() error {
+ p.Lock()
+ defer p.Unlock()
+
+ p.StopCalled = true
+ if p.StopFn != nil {
+ return p.StopFn()
+ }
+
+ return p.StopReturnError
+}
+
+func (p *MockResourceProvider) Apply(
+ info *InstanceInfo,
+ state *InstanceState,
+ diff *InstanceDiff) (*InstanceState, error) {
+ // We only lock while writing data. Reading is fine
+ p.Lock()
+ p.ApplyCalled = true
+ p.ApplyInfo = info
+ p.ApplyState = state
+ p.ApplyDiff = diff
+ p.Unlock()
+
+ if p.ApplyFn != nil {
+ return p.ApplyFn(info, state, diff)
+ }
+
+ return p.ApplyReturn.DeepCopy(), p.ApplyReturnError
+}
+
+func (p *MockResourceProvider) Diff(
+ info *InstanceInfo,
+ state *InstanceState,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.DiffCalled = true
+ p.DiffInfo = info
+ p.DiffState = state
+ p.DiffDesired = desired
+ if p.DiffFn != nil {
+ return p.DiffFn(info, state, desired)
+ }
+
+ return p.DiffReturn.DeepCopy(), p.DiffReturnError
+}
+
+func (p *MockResourceProvider) Refresh(
+ info *InstanceInfo,
+ s *InstanceState) (*InstanceState, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.RefreshCalled = true
+ p.RefreshInfo = info
+ p.RefreshState = s
+
+ if p.RefreshFn != nil {
+ return p.RefreshFn(info, s)
+ }
+
+ return p.RefreshReturn.DeepCopy(), p.RefreshReturnError
+}
+
+func (p *MockResourceProvider) Resources() []ResourceType {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ResourcesCalled = true
+ return p.ResourcesReturn
+}
+
+func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ImportStateCalled = true
+ p.ImportStateInfo = info
+ p.ImportStateID = id
+ if p.ImportStateFn != nil {
+ return p.ImportStateFn(info, id)
+ }
+
+ var result []*InstanceState
+ if p.ImportStateReturn != nil {
+ result = make([]*InstanceState, len(p.ImportStateReturn))
+ for i, v := range p.ImportStateReturn {
+ result[i] = v.DeepCopy()
+ }
+ }
+
+ return result, p.ImportStateReturnError
+}
+
+func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ValidateDataSourceCalled = true
+ p.ValidateDataSourceType = t
+ p.ValidateDataSourceConfig = c
+
+ if p.ValidateDataSourceFn != nil {
+ return p.ValidateDataSourceFn(t, c)
+ }
+
+ return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors
+}
+
+func (p *MockResourceProvider) ReadDataDiff(
+ info *InstanceInfo,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ReadDataDiffCalled = true
+ p.ReadDataDiffInfo = info
+ p.ReadDataDiffDesired = desired
+ if p.ReadDataDiffFn != nil {
+ return p.ReadDataDiffFn(info, desired)
+ }
+
+ return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError
+}
+
+func (p *MockResourceProvider) ReadDataApply(
+ info *InstanceInfo,
+ d *InstanceDiff) (*InstanceState, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ReadDataApplyCalled = true
+ p.ReadDataApplyInfo = info
+ p.ReadDataApplyDiff = d
+
+ if p.ReadDataApplyFn != nil {
+ return p.ReadDataApplyFn(info, d)
+ }
+
+ return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError
+}
+
+func (p *MockResourceProvider) DataSources() []DataSource {
+ p.Lock()
+ defer p.Unlock()
+
+ p.DataSourcesCalled = true
+ return p.DataSourcesReturn
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
new file mode 100644
index 00000000..361ec1ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
@@ -0,0 +1,54 @@
+package terraform
+
+// ResourceProvisioner is an interface that must be implemented by any
+// resource provisioner: the thing that initializes resources in
+// a Terraform configuration.
+type ResourceProvisioner interface {
+ // Validate is called once at the beginning with the raw
+ // configuration (no interpolation done) and can return a list of warnings
+ // and/or errors.
+ //
+ // This is called once per resource.
+ //
+ // This should not assume any of the values in the resource configuration
+ // are valid since it is possible they have to be interpolated still.
+ // The primary use case of this call is to check that the required keys
+ // are set and that the general structure is correct.
+ Validate(*ResourceConfig) ([]string, []error)
+
+ // Apply runs the provisioner on a specific resource and returns the new
+ // resource state along with an error. Instead of a diff, the ResourceConfig
+ // is provided since provisioners only run after a resource has been
+ // newly created.
+ Apply(UIOutput, *InstanceState, *ResourceConfig) error
+
+ // Stop is called when the provisioner should halt any in-flight actions.
+ //
+ // This can be used to make a nicer Ctrl-C experience for Terraform.
+ // Even if this isn't implemented to do anything (just returns nil),
+ // Terraform will still cleanly stop after the currently executing
+ // graph node is complete. However, this API can be used to make more
+ // efficient halts.
+ //
+ // Stop doesn't have to and shouldn't block waiting for in-flight actions
+ // to complete. It should take any action it wants and return immediately
+ // acknowledging it has received the stop request. Terraform core will
+ // automatically not make any further API calls to the provider soon
+ // after Stop is called (technically exactly once the currently executing
+ // graph nodes are complete).
+ //
+ // The error returned, if non-nil, is assumed to mean that signaling the
+ // stop somehow failed and that the user should expect potentially waiting
+ // a longer period of time.
+ Stop() error
+}
+
+// ResourceProvisionerCloser is an interface that provisioners that can close
+// connections that aren't needed anymore must implement.
+type ResourceProvisionerCloser interface {
+ Close() error
+}
+
+// ResourceProvisionerFactory is a function type that creates a new instance
+// of a resource provisioner.
+type ResourceProvisionerFactory func() (ResourceProvisioner, error)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
new file mode 100644
index 00000000..f471a518
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
@@ -0,0 +1,72 @@
+package terraform
+
+import "sync"
+
+// MockResourceProvisioner implements ResourceProvisioner but mocks out all the
+// calls for testing purposes.
+type MockResourceProvisioner struct {
+ sync.Mutex
+ // Anything you want, in case you need to store extra data with the mock.
+ Meta interface{}
+
+ ApplyCalled bool
+ ApplyOutput UIOutput
+ ApplyState *InstanceState
+ ApplyConfig *ResourceConfig
+ ApplyFn func(*InstanceState, *ResourceConfig) error
+ ApplyReturnError error
+
+ ValidateCalled bool
+ ValidateConfig *ResourceConfig
+ ValidateFn func(c *ResourceConfig) ([]string, []error)
+ ValidateReturnWarns []string
+ ValidateReturnErrors []error
+
+ StopCalled bool
+ StopFn func() error
+ StopReturnError error
+}
+
+func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ValidateCalled = true
+ p.ValidateConfig = c
+ if p.ValidateFn != nil {
+ return p.ValidateFn(c)
+ }
+ return p.ValidateReturnWarns, p.ValidateReturnErrors
+}
+
+func (p *MockResourceProvisioner) Apply(
+ output UIOutput,
+ state *InstanceState,
+ c *ResourceConfig) error {
+ p.Lock()
+
+ p.ApplyCalled = true
+ p.ApplyOutput = output
+ p.ApplyState = state
+ p.ApplyConfig = c
+ if p.ApplyFn != nil {
+ fn := p.ApplyFn
+ p.Unlock()
+ return fn(state, c)
+ }
+
+ defer p.Unlock()
+ return p.ApplyReturnError
+}
+
+func (p *MockResourceProvisioner) Stop() error {
+ p.Lock()
+ defer p.Unlock()
+
+ p.StopCalled = true
+ if p.StopFn != nil {
+ return p.StopFn()
+ }
+
+ return p.StopReturnError
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
new file mode 100644
index 00000000..20f1d8a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
@@ -0,0 +1,132 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphSemanticChecker is the interface that semantic checks across
+// the entire Terraform graph implement.
+//
+// The graph should NOT be modified by the semantic checker.
+type GraphSemanticChecker interface {
+ Check(*dag.Graph) error
+}
+
+// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker
+// that runs a list of SemanticCheckers against the vertices of the graph
+// in no specified order.
+type UnorderedSemanticCheckRunner struct {
+ Checks []SemanticChecker
+}
+
+func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error {
+ var err error
+ for _, v := range g.Vertices() {
+ for _, check := range sc.Checks {
+ if e := check.Check(g, v); e != nil {
+ err = multierror.Append(err, e)
+ }
+ }
+ }
+
+ return err
+}
+
+// SemanticChecker is the interface that semantic checks across the
+// Terraform graph implement. Errors are accumulated. Even after an error
+// is returned, child vertices in the graph will still be visited.
+//
+// The graph should NOT be modified by the semantic checker.
+//
+// The order in which vertices are visited is left unspecified, so the
+// semantic checks should not rely on that.
+type SemanticChecker interface {
+ Check(*dag.Graph, dag.Vertex) error
+}
+
+// smcUserVariables does all the semantic checks to verify that the
+// variables given satisfy the configuration itself.
+func smcUserVariables(c *config.Config, vs map[string]interface{}) []error {
+ var errs []error
+
+ cvs := make(map[string]*config.Variable)
+ for _, v := range c.Variables {
+ cvs[v.Name] = v
+ }
+
+ // Check that all required variables are present
+ required := make(map[string]struct{})
+ for _, v := range c.Variables {
+ if v.Required() {
+ required[v.Name] = struct{}{}
+ }
+ }
+ for k, _ := range vs {
+ delete(required, k)
+ }
+ if len(required) > 0 {
+ for k, _ := range required {
+ errs = append(errs, fmt.Errorf(
+ "Required variable not set: %s", k))
+ }
+ }
+
+ // Check that types match up
+ for name, proposedValue := range vs {
+ // Check for "map.key" fields. These stopped working with Terraform
+ // 0.7 but we do this to surface a better error message informing
+ // the user what happened.
+ if idx := strings.Index(name, "."); idx > 0 {
+ key := name[:idx]
+ if _, ok := cvs[key]; ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: Overriding map keys with the format `name.key` is no "+
+ "longer allowed. You may still override keys by setting "+
+ "`name = { key = value }`. The maps will be merged. This "+
+ "behavior appeared in 0.7.0.", name))
+ continue
+ }
+ }
+
+ schema, ok := cvs[name]
+ if !ok {
+ continue
+ }
+
+ declaredType := schema.Type()
+
+ switch declaredType {
+ case config.VariableTypeString:
+ switch proposedValue.(type) {
+ case string:
+ continue
+ }
+ case config.VariableTypeMap:
+ switch v := proposedValue.(type) {
+ case map[string]interface{}:
+ continue
+ case []map[string]interface{}:
+ // if we have a list of 1 map, it will get coerced later as needed
+ if len(v) == 1 {
+ continue
+ }
+ }
+ case config.VariableTypeList:
+ switch proposedValue.(type) {
+ case []interface{}:
+ continue
+ }
+ }
+ errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s",
+ name, declaredType.Printable(), hclTypeName(proposedValue)))
+ }
+
+ // TODO(mitchellh): variables that are unknown
+
+ return errs
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow.go b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
new file mode 100644
index 00000000..46325595
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
@@ -0,0 +1,28 @@
+package terraform
+
+// Shadow is the interface that any "shadow" structures must implement.
+//
+// A shadow structure is an interface implementation (typically) that
+// shadows a real implementation and verifies that the same behavior occurs
+// on both. The semantics of this behavior are up to the interface itself.
+//
+// A shadow NEVER modifies real values or state. It must always be safe to use.
+//
+// For example, a ResourceProvider shadow ensures that the same operations
+// are done on the same resources with the same configurations.
+//
+// The typical usage of a shadow following this interface is to complete
+// the real operations, then call CloseShadow which tells the shadow that
+// the real side is done. Then, once the shadow is also complete, call
+// ShadowError to find any errors that may have been caught.
+type Shadow interface {
+ // CloseShadow tells the shadow that the REAL implementation is
+ // complete. Therefore, any calls that would block should now return
+ // immediately since no more changes will happen to the real side.
+ CloseShadow() error
+
+ // ShadowError returns the errors that the shadow has found.
+ // This should be called AFTER CloseShadow and AFTER the shadow is
+ // known to be complete (no more calls to it).
+ ShadowError() error
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
new file mode 100644
index 00000000..116cf84f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
@@ -0,0 +1,273 @@
+package terraform
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/helper/shadow"
+)
+
+// newShadowComponentFactory creates a shadowed contextComponentFactory
+// so that requests to create new components result in both a real and
+// shadow side.
+func newShadowComponentFactory(
+ f contextComponentFactory) (contextComponentFactory, *shadowComponentFactory) {
+ // Create the shared data
+ shared := &shadowComponentFactoryShared{contextComponentFactory: f}
+
+ // Create the real side
+ real := &shadowComponentFactory{
+ shadowComponentFactoryShared: shared,
+ }
+
+ // Create the shadow
+ shadow := &shadowComponentFactory{
+ shadowComponentFactoryShared: shared,
+ Shadow: true,
+ }
+
+ return real, shadow
+}
+
+// shadowComponentFactory is the shadow side. Any components created
+// with this factory are fake and will not cause real work to happen.
+//
+// Unlike other shadowers, the shadow component factory will allow the
+// shadow to create _any_ component even if it is never requested on the
+// real side. This is because errors will happen later downstream as function
+// calls are made to the shadows that are never matched on the real side.
+type shadowComponentFactory struct {
+ *shadowComponentFactoryShared
+
+ Shadow bool // True if this should return the shadow
+ lock sync.Mutex
+}
+
+func (f *shadowComponentFactory) ResourceProvider(
+ n, uid string) (ResourceProvider, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ real, shadow, err := f.shadowComponentFactoryShared.ResourceProvider(n, uid)
+ var result ResourceProvider = real
+ if f.Shadow {
+ result = shadow
+ }
+
+ return result, err
+}
+
+func (f *shadowComponentFactory) ResourceProvisioner(
+ n, uid string) (ResourceProvisioner, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ real, shadow, err := f.shadowComponentFactoryShared.ResourceProvisioner(n, uid)
+ var result ResourceProvisioner = real
+ if f.Shadow {
+ result = shadow
+ }
+
+ return result, err
+}
+
+// CloseShadow is called when the _real_ side is complete. This will cause
+// all future blocking operations to return immediately on the shadow to
+// ensure the shadow also completes.
+func (f *shadowComponentFactory) CloseShadow() error {
+ // If we aren't the shadow, just return
+ if !f.Shadow {
+ return nil
+ }
+
+ // Lock ourselves so we don't modify state
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ // Grab our shared state
+ shared := f.shadowComponentFactoryShared
+
+ // If we're already closed, its an error
+ if shared.closed {
+ return fmt.Errorf("component factory shadow already closed")
+ }
+
+ // Close all the providers and provisioners and return the error
+ var result error
+ for _, n := range shared.providerKeys {
+ _, shadow, err := shared.ResourceProvider(n, n)
+ if err == nil && shadow != nil {
+ if err := shadow.CloseShadow(); err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ }
+
+ for _, n := range shared.provisionerKeys {
+ _, shadow, err := shared.ResourceProvisioner(n, n)
+ if err == nil && shadow != nil {
+ if err := shadow.CloseShadow(); err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ }
+
+ // Mark ourselves as closed
+ shared.closed = true
+
+ return result
+}
+
+func (f *shadowComponentFactory) ShadowError() error {
+ // If we aren't the shadow, just return
+ if !f.Shadow {
+ return nil
+ }
+
+ // Lock ourselves so we don't modify state
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ // Grab our shared state
+ shared := f.shadowComponentFactoryShared
+
+ // If we're not closed, its an error
+ if !shared.closed {
+ return fmt.Errorf("component factory must be closed to retrieve errors")
+ }
+
+ // Close all the providers and provisioners and return the error
+ var result error
+ for _, n := range shared.providerKeys {
+ _, shadow, err := shared.ResourceProvider(n, n)
+ if err == nil && shadow != nil {
+ if err := shadow.ShadowError(); err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ }
+
+ for _, n := range shared.provisionerKeys {
+ _, shadow, err := shared.ResourceProvisioner(n, n)
+ if err == nil && shadow != nil {
+ if err := shadow.ShadowError(); err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ }
+
+ return result
+}
+
+// shadowComponentFactoryShared is shared data between the two factories.
+//
+// It is NOT SAFE to run any function on this struct in parallel. Lock
+// access to this struct.
+type shadowComponentFactoryShared struct {
+ contextComponentFactory
+
+ closed bool
+ providers shadow.KeyedValue
+ providerKeys []string
+ provisioners shadow.KeyedValue
+ provisionerKeys []string
+}
+
+// shadowResourceProviderFactoryEntry is the entry that is stored in
+// the Shadows key/value for a provider.
+type shadowComponentFactoryProviderEntry struct {
+ Real ResourceProvider
+ Shadow shadowResourceProvider
+ Err error
+}
+
+type shadowComponentFactoryProvisionerEntry struct {
+ Real ResourceProvisioner
+ Shadow shadowResourceProvisioner
+ Err error
+}
+
+func (f *shadowComponentFactoryShared) ResourceProvider(
+ n, uid string) (ResourceProvider, shadowResourceProvider, error) {
+ // Determine if we already have a value
+ raw, ok := f.providers.ValueOk(uid)
+ if !ok {
+ // Build the entry
+ var entry shadowComponentFactoryProviderEntry
+
+ // No value, initialize. Create the original
+ p, err := f.contextComponentFactory.ResourceProvider(n, uid)
+ if err != nil {
+ entry.Err = err
+ p = nil // Just to be sure
+ }
+
+ if p != nil {
+ // Create the shadow
+ real, shadow := newShadowResourceProvider(p)
+ entry.Real = real
+ entry.Shadow = shadow
+
+ if f.closed {
+ shadow.CloseShadow()
+ }
+ }
+
+ // Store the value
+ f.providers.SetValue(uid, &entry)
+ f.providerKeys = append(f.providerKeys, uid)
+ raw = &entry
+ }
+
+ // Read the entry
+ entry, ok := raw.(*shadowComponentFactoryProviderEntry)
+ if !ok {
+ return nil, nil, fmt.Errorf("Unknown value for shadow provider: %#v", raw)
+ }
+
+ // Return
+ return entry.Real, entry.Shadow, entry.Err
+}
+
+func (f *shadowComponentFactoryShared) ResourceProvisioner(
+ n, uid string) (ResourceProvisioner, shadowResourceProvisioner, error) {
+ // Determine if we already have a value
+ raw, ok := f.provisioners.ValueOk(uid)
+ if !ok {
+ // Build the entry
+ var entry shadowComponentFactoryProvisionerEntry
+
+ // No value, initialize. Create the original
+ p, err := f.contextComponentFactory.ResourceProvisioner(n, uid)
+ if err != nil {
+ entry.Err = err
+ p = nil // Just to be sure
+ }
+
+ if p != nil {
+ // For now, just create a mock since we don't support provisioners yet
+ real, shadow := newShadowResourceProvisioner(p)
+ entry.Real = real
+ entry.Shadow = shadow
+
+ if f.closed {
+ shadow.CloseShadow()
+ }
+ }
+
+ // Store the value
+ f.provisioners.SetValue(uid, &entry)
+ f.provisionerKeys = append(f.provisionerKeys, uid)
+ raw = &entry
+ }
+
+ // Read the entry
+ entry, ok := raw.(*shadowComponentFactoryProvisionerEntry)
+ if !ok {
+ return nil, nil, fmt.Errorf("Unknown value for shadow provisioner: %#v", raw)
+ }
+
+ // Return
+ return entry.Real, entry.Shadow, entry.Err
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
new file mode 100644
index 00000000..5588af25
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
@@ -0,0 +1,158 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/mitchellh/copystructure"
+)
+
+// newShadowContext creates a new context that will shadow the given context
+// when walking the graph. The resulting context should be used _only once_
+// for a graph walk.
+//
+// The returned Shadow should be closed after the graph walk with the
+// real context is complete. Errors from the shadow can be retrieved there.
+//
+// Most importantly, any operations done on the shadow context (the returned
+// context) will NEVER affect the real context. All structures are deep
+// copied, no real providers or resources are used, etc.
+func newShadowContext(c *Context) (*Context, *Context, Shadow) {
+ // Copy the targets
+ targetRaw, err := copystructure.Copy(c.targets)
+ if err != nil {
+ panic(err)
+ }
+
+ // Copy the variables
+ varRaw, err := copystructure.Copy(c.variables)
+ if err != nil {
+ panic(err)
+ }
+
+ // Copy the provider inputs
+ providerInputRaw, err := copystructure.Copy(c.providerInputConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ // The factories
+ componentsReal, componentsShadow := newShadowComponentFactory(c.components)
+
+ // Create the shadow
+ shadow := &Context{
+ components: componentsShadow,
+ destroy: c.destroy,
+ diff: c.diff.DeepCopy(),
+ hooks: nil,
+ meta: c.meta,
+ module: c.module,
+ state: c.state.DeepCopy(),
+ targets: targetRaw.([]string),
+ variables: varRaw.(map[string]interface{}),
+
+ // NOTE(mitchellh): This is not going to work for shadows that are
+ // testing that input results in the proper end state. At the time
+ // of writing, input is not used in any state-changing graph
+ // walks anyways, so this checks nothing. We set it to this to avoid
+ // any panics but even a "nil" value worked here.
+ uiInput: new(MockUIInput),
+
+ // Hardcoded to 4 since parallelism in the shadow doesn't matter
+ // a ton since we're doing far less compared to the real side
+ // and our operations are MUCH faster.
+ parallelSem: NewSemaphore(4),
+ providerInputConfig: providerInputRaw.(map[string]map[string]interface{}),
+ }
+
+ // Create the real context. This is effectively just a copy of
+ // the context given except we need to modify some of the values
+ // to point to the real side of a shadow so the shadow can compare values.
+ real := &Context{
+ // The fields below are changed.
+ components: componentsReal,
+
+ // The fields below are direct copies
+ destroy: c.destroy,
+ diff: c.diff,
+ // diffLock - no copy
+ hooks: c.hooks,
+ meta: c.meta,
+ module: c.module,
+ sh: c.sh,
+ state: c.state,
+ // stateLock - no copy
+ targets: c.targets,
+ uiInput: c.uiInput,
+ variables: c.variables,
+
+ // l - no copy
+ parallelSem: c.parallelSem,
+ providerInputConfig: c.providerInputConfig,
+ runContext: c.runContext,
+ runContextCancel: c.runContextCancel,
+ shadowErr: c.shadowErr,
+ }
+
+ return real, shadow, &shadowContextCloser{
+ Components: componentsShadow,
+ }
+}
+
+// shadowContextVerify takes the real and shadow context and verifies they
+// have equal diffs and states.
+func shadowContextVerify(real, shadow *Context) error {
+ var result error
+
+ // The states compared must be pruned so they're minimal/clean
+ real.state.prune()
+ shadow.state.prune()
+
+ // Compare the states
+ if !real.state.Equal(shadow.state) {
+ result = multierror.Append(result, fmt.Errorf(
+ "Real and shadow states do not match! "+
+ "Real state:\n\n%s\n\n"+
+ "Shadow state:\n\n%s\n\n",
+ real.state, shadow.state))
+ }
+
+ // Compare the diffs
+ if !real.diff.Equal(shadow.diff) {
+ result = multierror.Append(result, fmt.Errorf(
+ "Real and shadow diffs do not match! "+
+ "Real diff:\n\n%s\n\n"+
+ "Shadow diff:\n\n%s\n\n",
+ real.diff, shadow.diff))
+ }
+
+ return result
+}
+
+// shadowContextCloser is the io.Closer returned by newShadowContext that
+// closes all the shadows and returns the results.
+type shadowContextCloser struct {
+ Components *shadowComponentFactory
+}
+
+// Close closes the shadow context.
+func (c *shadowContextCloser) CloseShadow() error {
+ return c.Components.CloseShadow()
+}
+
+func (c *shadowContextCloser) ShadowError() error {
+ err := c.Components.ShadowError()
+ if err == nil {
+ return nil
+ }
+
+ // This is a sad edge case: if the configuration contains uuid() at
+ // any point, we cannot reason aboyt the shadow execution. Tested
+ // with Context2Plan_shadowUuid.
+ if strings.Contains(err.Error(), "uuid()") {
+ err = nil
+ }
+
+ return err
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
new file mode 100644
index 00000000..9741d7e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
@@ -0,0 +1,815 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/helper/shadow"
+)
+
+// shadowResourceProvider implements ResourceProvider for the shadow
+// eval context defined in eval_context_shadow.go.
+//
+// This is used to verify behavior with a real provider. This shouldn't
+// be used directly.
+type shadowResourceProvider interface {
+ ResourceProvider
+ Shadow
+}
+
+// newShadowResourceProvider creates a new shadowed ResourceProvider.
+//
+// This will assume a well behaved real ResourceProvider. For example,
+// it assumes that the `Resources` call underneath doesn't change values
+// since once it is called on the real provider, it will be cached and
+// returned in the shadow since number of calls to that shouldn't affect
+// actual behavior.
+//
+// However, with calls like Apply, call order is taken into account,
+// parameters are checked for equality, etc.
+func newShadowResourceProvider(p ResourceProvider) (ResourceProvider, shadowResourceProvider) {
+ // Create the shared data
+ shared := shadowResourceProviderShared{}
+
+ // Create the real provider that does actual work
+ real := &shadowResourceProviderReal{
+ ResourceProvider: p,
+ Shared: &shared,
+ }
+
+ // Create the shadow that watches the real value
+ shadow := &shadowResourceProviderShadow{
+ Shared: &shared,
+
+ resources: p.Resources(),
+ dataSources: p.DataSources(),
+ }
+
+ return real, shadow
+}
+
+// shadowResourceProviderReal is the real resource provider. Function calls
+// to this will perform real work. This records the parameters and return
+// values and call order for the shadow to reproduce.
+type shadowResourceProviderReal struct {
+ ResourceProvider
+
+ Shared *shadowResourceProviderShared
+}
+
+func (p *shadowResourceProviderReal) Close() error {
+ var result error
+ if c, ok := p.ResourceProvider.(ResourceProviderCloser); ok {
+ result = c.Close()
+ }
+
+ p.Shared.CloseErr.SetValue(result)
+ return result
+}
+
+func (p *shadowResourceProviderReal) Input(
+ input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
+ cCopy := c.DeepCopy()
+
+ result, err := p.ResourceProvider.Input(input, c)
+ p.Shared.Input.SetValue(&shadowResourceProviderInput{
+ Config: cCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) Validate(c *ResourceConfig) ([]string, []error) {
+ warns, errs := p.ResourceProvider.Validate(c)
+ p.Shared.Validate.SetValue(&shadowResourceProviderValidate{
+ Config: c.DeepCopy(),
+ ResultWarn: warns,
+ ResultErr: errs,
+ })
+
+ return warns, errs
+}
+
+func (p *shadowResourceProviderReal) Configure(c *ResourceConfig) error {
+ cCopy := c.DeepCopy()
+
+ err := p.ResourceProvider.Configure(c)
+ p.Shared.Configure.SetValue(&shadowResourceProviderConfigure{
+ Config: cCopy,
+ Result: err,
+ })
+
+ return err
+}
+
+func (p *shadowResourceProviderReal) Stop() error {
+ return p.ResourceProvider.Stop()
+}
+
+func (p *shadowResourceProviderReal) ValidateResource(
+ t string, c *ResourceConfig) ([]string, []error) {
+ key := t
+ configCopy := c.DeepCopy()
+
+ // Real operation
+ warns, errs := p.ResourceProvider.ValidateResource(t, c)
+
+ // Initialize to ensure we always have a wrapper with a lock
+ p.Shared.ValidateResource.Init(
+ key, &shadowResourceProviderValidateResourceWrapper{})
+
+ // Get the result
+ raw := p.Shared.ValidateResource.Value(key)
+ wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
+ if !ok {
+ // If this fails then we just continue with our day... the shadow
+ // will fail to but there isn't much we can do.
+ log.Printf(
+ "[ERROR] unknown value in ValidateResource shadow value: %#v", raw)
+ return warns, errs
+ }
+
+ // Lock the wrapper for writing and record our call
+ wrapper.Lock()
+ defer wrapper.Unlock()
+
+ wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateResource{
+ Config: configCopy,
+ Warns: warns,
+ Errors: errs,
+ })
+
+ // With it locked, call SetValue again so that it triggers WaitForChange
+ p.Shared.ValidateResource.SetValue(key, wrapper)
+
+ // Return the result
+ return warns, errs
+}
+
+func (p *shadowResourceProviderReal) Apply(
+ info *InstanceInfo,
+ state *InstanceState,
+ diff *InstanceDiff) (*InstanceState, error) {
+ // Thse have to be copied before the call since call can modify
+ stateCopy := state.DeepCopy()
+ diffCopy := diff.DeepCopy()
+
+ result, err := p.ResourceProvider.Apply(info, state, diff)
+ p.Shared.Apply.SetValue(info.uniqueId(), &shadowResourceProviderApply{
+ State: stateCopy,
+ Diff: diffCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) Diff(
+ info *InstanceInfo,
+ state *InstanceState,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ // Thse have to be copied before the call since call can modify
+ stateCopy := state.DeepCopy()
+ desiredCopy := desired.DeepCopy()
+
+ result, err := p.ResourceProvider.Diff(info, state, desired)
+ p.Shared.Diff.SetValue(info.uniqueId(), &shadowResourceProviderDiff{
+ State: stateCopy,
+ Desired: desiredCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) Refresh(
+ info *InstanceInfo,
+ state *InstanceState) (*InstanceState, error) {
+ // Thse have to be copied before the call since call can modify
+ stateCopy := state.DeepCopy()
+
+ result, err := p.ResourceProvider.Refresh(info, state)
+ p.Shared.Refresh.SetValue(info.uniqueId(), &shadowResourceProviderRefresh{
+ State: stateCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) ValidateDataSource(
+ t string, c *ResourceConfig) ([]string, []error) {
+ key := t
+ configCopy := c.DeepCopy()
+
+ // Real operation
+ warns, errs := p.ResourceProvider.ValidateDataSource(t, c)
+
+ // Initialize
+ p.Shared.ValidateDataSource.Init(
+ key, &shadowResourceProviderValidateDataSourceWrapper{})
+
+ // Get the result
+ raw := p.Shared.ValidateDataSource.Value(key)
+ wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
+ if !ok {
+ // If this fails then we just continue with our day... the shadow
+ // will fail to but there isn't much we can do.
+ log.Printf(
+ "[ERROR] unknown value in ValidateDataSource shadow value: %#v", raw)
+ return warns, errs
+ }
+
+ // Lock the wrapper for writing and record our call
+ wrapper.Lock()
+ defer wrapper.Unlock()
+
+ wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateDataSource{
+ Config: configCopy,
+ Warns: warns,
+ Errors: errs,
+ })
+
+ // Set it
+ p.Shared.ValidateDataSource.SetValue(key, wrapper)
+
+ // Return the result
+ return warns, errs
+}
+
+func (p *shadowResourceProviderReal) ReadDataDiff(
+ info *InstanceInfo,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ // These have to be copied before the call since call can modify
+ desiredCopy := desired.DeepCopy()
+
+ result, err := p.ResourceProvider.ReadDataDiff(info, desired)
+ p.Shared.ReadDataDiff.SetValue(info.uniqueId(), &shadowResourceProviderReadDataDiff{
+ Desired: desiredCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) ReadDataApply(
+ info *InstanceInfo,
+ diff *InstanceDiff) (*InstanceState, error) {
+ // Thse have to be copied before the call since call can modify
+ diffCopy := diff.DeepCopy()
+
+ result, err := p.ResourceProvider.ReadDataApply(info, diff)
+ p.Shared.ReadDataApply.SetValue(info.uniqueId(), &shadowResourceProviderReadDataApply{
+ Diff: diffCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+// shadowResourceProviderShadow is the shadow resource provider. Function
+// calls never affect real resources. This is paired with the "real" side
+// which must be called properly to enable recording.
+type shadowResourceProviderShadow struct {
+ Shared *shadowResourceProviderShared
+
+ // Cached values that are expected to not change
+ resources []ResourceType
+ dataSources []DataSource
+
+ Error error // Error is the list of errors from the shadow
+ ErrorLock sync.Mutex
+}
+
+type shadowResourceProviderShared struct {
+ // NOTE: Anytime a value is added here, be sure to add it to
+ // the Close() method so that it is closed.
+
+ CloseErr shadow.Value
+ Input shadow.Value
+ Validate shadow.Value
+ Configure shadow.Value
+ ValidateResource shadow.KeyedValue
+ Apply shadow.KeyedValue
+ Diff shadow.KeyedValue
+ Refresh shadow.KeyedValue
+ ValidateDataSource shadow.KeyedValue
+ ReadDataDiff shadow.KeyedValue
+ ReadDataApply shadow.KeyedValue
+}
+
+func (p *shadowResourceProviderShared) Close() error {
+ return shadow.Close(p)
+}
+
+func (p *shadowResourceProviderShadow) CloseShadow() error {
+ err := p.Shared.Close()
+ if err != nil {
+ err = fmt.Errorf("close error: %s", err)
+ }
+
+ return err
+}
+
+func (p *shadowResourceProviderShadow) ShadowError() error {
+ return p.Error
+}
+
+func (p *shadowResourceProviderShadow) Resources() []ResourceType {
+ return p.resources
+}
+
+func (p *shadowResourceProviderShadow) DataSources() []DataSource {
+ return p.dataSources
+}
+
+func (p *shadowResourceProviderShadow) Close() error {
+ v := p.Shared.CloseErr.Value()
+ if v == nil {
+ return nil
+ }
+
+ return v.(error)
+}
+
+func (p *shadowResourceProviderShadow) Input(
+ input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
+ // Get the result of the input call
+ raw := p.Shared.Input.Value()
+ if raw == nil {
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderInput)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'input' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !c.Equal(result.Config) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Input had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
+ result.Config, c))
+ p.ErrorLock.Unlock()
+ }
+
+ // Return the results
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Validate(c *ResourceConfig) ([]string, []error) {
+ // Get the result of the validate call
+ raw := p.Shared.Validate.Value()
+ if raw == nil {
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderValidate)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'validate' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !c.Equal(result.Config) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Validate had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
+ result.Config, c))
+ p.ErrorLock.Unlock()
+ }
+
+ // Return the results
+ return result.ResultWarn, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Configure(c *ResourceConfig) error {
+ // Get the result of the call
+ raw := p.Shared.Configure.Value()
+ if raw == nil {
+ return nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderConfigure)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'configure' shadow value: %#v", raw))
+ return nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !c.Equal(result.Config) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Configure had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
+ result.Config, c))
+ p.ErrorLock.Unlock()
+ }
+
+ // Return the results
+ return result.Result
+}
+
+// Stop returns immediately.
+func (p *shadowResourceProviderShadow) Stop() error {
+ return nil
+}
+
+func (p *shadowResourceProviderShadow) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
+ // Unique key
+ key := t
+
+ // Get the initial value
+ raw := p.Shared.ValidateResource.Value(key)
+
+ // Find a validation with our configuration
+ var result *shadowResourceProviderValidateResource
+ for {
+ // Get the value
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ValidateResource' call for %q:\n\n%#v",
+ key, c))
+ return nil, nil
+ }
+
+ wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ValidateResource' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Look for the matching call with our configuration
+ wrapper.RLock()
+ for _, call := range wrapper.Calls {
+ if call.Config.Equal(c) {
+ result = call
+ break
+ }
+ }
+ wrapper.RUnlock()
+
+ // If we found a result, exit
+ if result != nil {
+ break
+ }
+
+ // Wait for a change so we can get the wrapper again
+ raw = p.Shared.ValidateResource.WaitForChange(key)
+ }
+
+ return result.Warns, result.Errors
+}
+
+func (p *shadowResourceProviderShadow) Apply(
+ info *InstanceInfo,
+ state *InstanceState,
+ diff *InstanceDiff) (*InstanceState, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.Apply.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'apply' call for %q:\n\n%#v\n\n%#v",
+ key, state, diff))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderApply)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'apply' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !state.Equal(result.State) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Apply %q: state had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.State, state))
+ p.ErrorLock.Unlock()
+ }
+
+ if !diff.Equal(result.Diff) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Apply %q: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.Diff, diff))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Diff(
+ info *InstanceInfo,
+ state *InstanceState,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.Diff.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'diff' call for %q:\n\n%#v\n\n%#v",
+ key, state, desired))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderDiff)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'diff' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !state.Equal(result.State) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.State, state))
+ p.ErrorLock.Unlock()
+ }
+ if !desired.Equal(result.Desired) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.Desired, desired))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Refresh(
+ info *InstanceInfo,
+ state *InstanceState) (*InstanceState, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.Refresh.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'refresh' call for %q:\n\n%#v",
+ key, state))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderRefresh)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'refresh' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !state.Equal(result.State) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Refresh %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.State, state))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) ValidateDataSource(
+ t string, c *ResourceConfig) ([]string, []error) {
+ // Unique key
+ key := t
+
+ // Get the initial value
+ raw := p.Shared.ValidateDataSource.Value(key)
+
+ // Find a validation with our configuration
+ var result *shadowResourceProviderValidateDataSource
+ for {
+ // Get the value
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ValidateDataSource' call for %q:\n\n%#v",
+ key, c))
+ return nil, nil
+ }
+
+ wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ValidateDataSource' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // Look for the matching call with our configuration
+ wrapper.RLock()
+ for _, call := range wrapper.Calls {
+ if call.Config.Equal(c) {
+ result = call
+ break
+ }
+ }
+ wrapper.RUnlock()
+
+ // If we found a result, exit
+ if result != nil {
+ break
+ }
+
+ // Wait for a change so we can get the wrapper again
+ raw = p.Shared.ValidateDataSource.WaitForChange(key)
+ }
+
+ return result.Warns, result.Errors
+}
+
+func (p *shadowResourceProviderShadow) ReadDataDiff(
+ info *InstanceInfo,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.ReadDataDiff.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ReadDataDiff' call for %q:\n\n%#v",
+ key, desired))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderReadDataDiff)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ReadDataDiff' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !desired.Equal(result.Desired) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "ReadDataDiff %q had unequal configs (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.Desired, desired))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) ReadDataApply(
+ info *InstanceInfo,
+ d *InstanceDiff) (*InstanceState, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.ReadDataApply.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ReadDataApply' call for %q:\n\n%#v",
+ key, d))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderReadDataApply)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ReadDataApply' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !d.Equal(result.Diff) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "ReadDataApply: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
+ result.Diff, d))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
+ panic("import not supported by shadow graph")
+}
+
+// The structs for the various function calls are put below. These structs
+// are used to carry call information across the real/shadow boundaries.
+
+type shadowResourceProviderInput struct {
+ Config *ResourceConfig
+ Result *ResourceConfig
+ ResultErr error
+}
+
+type shadowResourceProviderValidate struct {
+ Config *ResourceConfig
+ ResultWarn []string
+ ResultErr []error
+}
+
+type shadowResourceProviderConfigure struct {
+ Config *ResourceConfig
+ Result error
+}
+
+type shadowResourceProviderValidateResourceWrapper struct {
+ sync.RWMutex
+
+ Calls []*shadowResourceProviderValidateResource
+}
+
+type shadowResourceProviderValidateResource struct {
+ Config *ResourceConfig
+ Warns []string
+ Errors []error
+}
+
+type shadowResourceProviderApply struct {
+ State *InstanceState
+ Diff *InstanceDiff
+ Result *InstanceState
+ ResultErr error
+}
+
+type shadowResourceProviderDiff struct {
+ State *InstanceState
+ Desired *ResourceConfig
+ Result *InstanceDiff
+ ResultErr error
+}
+
+type shadowResourceProviderRefresh struct {
+ State *InstanceState
+ Result *InstanceState
+ ResultErr error
+}
+
+type shadowResourceProviderValidateDataSourceWrapper struct {
+ sync.RWMutex
+
+ Calls []*shadowResourceProviderValidateDataSource
+}
+
+type shadowResourceProviderValidateDataSource struct {
+ Config *ResourceConfig
+ Warns []string
+ Errors []error
+}
+
+type shadowResourceProviderReadDataDiff struct {
+ Desired *ResourceConfig
+ Result *InstanceDiff
+ ResultErr error
+}
+
+type shadowResourceProviderReadDataApply struct {
+ Diff *InstanceDiff
+ Result *InstanceState
+ ResultErr error
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
new file mode 100644
index 00000000..60a49088
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
@@ -0,0 +1,282 @@
+package terraform
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/helper/shadow"
+)
+
+// shadowResourceProvisioner implements ResourceProvisioner for the shadow
+// eval context defined in eval_context_shadow.go.
+//
+// This is used to verify behavior with a real provisioner. This shouldn't
+// be used directly.
+type shadowResourceProvisioner interface {
+ ResourceProvisioner
+ Shadow
+}
+
+// newShadowResourceProvisioner creates a new shadowed ResourceProvisioner.
+func newShadowResourceProvisioner(
+ p ResourceProvisioner) (ResourceProvisioner, shadowResourceProvisioner) {
+ // Create the shared data
+ shared := shadowResourceProvisionerShared{
+ Validate: shadow.ComparedValue{
+ Func: shadowResourceProvisionerValidateCompare,
+ },
+ }
+
+ // Create the real provisioner that does actual work
+ real := &shadowResourceProvisionerReal{
+ ResourceProvisioner: p,
+ Shared: &shared,
+ }
+
+ // Create the shadow that watches the real value
+ shadow := &shadowResourceProvisionerShadow{
+ Shared: &shared,
+ }
+
+ return real, shadow
+}
+
+// shadowResourceProvisionerReal is the real resource provisioner. Function calls
+// to this will perform real work. This records the parameters and return
+// values and call order for the shadow to reproduce.
+type shadowResourceProvisionerReal struct {
+ ResourceProvisioner
+
+ Shared *shadowResourceProvisionerShared
+}
+
+func (p *shadowResourceProvisionerReal) Close() error {
+ var result error
+ if c, ok := p.ResourceProvisioner.(ResourceProvisionerCloser); ok {
+ result = c.Close()
+ }
+
+ p.Shared.CloseErr.SetValue(result)
+ return result
+}
+
+func (p *shadowResourceProvisionerReal) Validate(c *ResourceConfig) ([]string, []error) {
+ warns, errs := p.ResourceProvisioner.Validate(c)
+ p.Shared.Validate.SetValue(&shadowResourceProvisionerValidate{
+ Config: c,
+ ResultWarn: warns,
+ ResultErr: errs,
+ })
+
+ return warns, errs
+}
+
+func (p *shadowResourceProvisionerReal) Apply(
+ output UIOutput, s *InstanceState, c *ResourceConfig) error {
+ err := p.ResourceProvisioner.Apply(output, s, c)
+
+ // Write the result, grab a lock for writing. This should nver
+ // block long since the operations below don't block.
+ p.Shared.ApplyLock.Lock()
+ defer p.Shared.ApplyLock.Unlock()
+
+ key := s.ID
+ raw, ok := p.Shared.Apply.ValueOk(key)
+ if !ok {
+ // Setup a new value
+ raw = &shadow.ComparedValue{
+ Func: shadowResourceProvisionerApplyCompare,
+ }
+
+ // Set it
+ p.Shared.Apply.SetValue(key, raw)
+ }
+
+ compareVal, ok := raw.(*shadow.ComparedValue)
+ if !ok {
+ // Just log and return so that we don't cause the real side
+ // any side effects.
+ log.Printf("[ERROR] unknown value in 'apply': %#v", raw)
+ return err
+ }
+
+ // Write the resulting value
+ compareVal.SetValue(&shadowResourceProvisionerApply{
+ Config: c,
+ ResultErr: err,
+ })
+
+ return err
+}
+
+func (p *shadowResourceProvisionerReal) Stop() error {
+ return p.ResourceProvisioner.Stop()
+}
+
+// shadowResourceProvisionerShadow is the shadow resource provisioner. Function
+// calls never affect real resources. This is paired with the "real" side
+// which must be called properly to enable recording.
+type shadowResourceProvisionerShadow struct {
+ Shared *shadowResourceProvisionerShared
+
+ Error error // Error is the list of errors from the shadow
+ ErrorLock sync.Mutex
+}
+
+type shadowResourceProvisionerShared struct {
+ // NOTE: Anytime a value is added here, be sure to add it to
+ // the Close() method so that it is closed.
+
+ CloseErr shadow.Value
+ Validate shadow.ComparedValue
+ Apply shadow.KeyedValue
+ ApplyLock sync.Mutex // For writing only
+}
+
+func (p *shadowResourceProvisionerShared) Close() error {
+ closers := []io.Closer{
+ &p.CloseErr,
+ }
+
+ for _, c := range closers {
+ // This should never happen, but we don't panic because a panic
+ // could affect the real behavior of Terraform and a shadow should
+ // never be able to do that.
+ if err := c.Close(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (p *shadowResourceProvisionerShadow) CloseShadow() error {
+ err := p.Shared.Close()
+ if err != nil {
+ err = fmt.Errorf("close error: %s", err)
+ }
+
+ return err
+}
+
+func (p *shadowResourceProvisionerShadow) ShadowError() error {
+ return p.Error
+}
+
+func (p *shadowResourceProvisionerShadow) Close() error {
+ v := p.Shared.CloseErr.Value()
+ if v == nil {
+ return nil
+ }
+
+ return v.(error)
+}
+
+func (p *shadowResourceProvisionerShadow) Validate(c *ResourceConfig) ([]string, []error) {
+ // Get the result of the validate call
+ raw := p.Shared.Validate.Value(c)
+ if raw == nil {
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProvisionerValidate)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'validate' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // We don't need to compare configurations because we key on the
+ // configuration so just return right away.
+ return result.ResultWarn, result.ResultErr
+}
+
+func (p *shadowResourceProvisionerShadow) Apply(
+ output UIOutput, s *InstanceState, c *ResourceConfig) error {
+ // Get the value based on the key
+ key := s.ID
+ raw := p.Shared.Apply.Value(key)
+ if raw == nil {
+ return nil
+ }
+
+ compareVal, ok := raw.(*shadow.ComparedValue)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'apply' shadow value: %#v", raw))
+ return nil
+ }
+
+ // With the compared value, we compare against our config
+ raw = compareVal.Value(c)
+ if raw == nil {
+ return nil
+ }
+
+ result, ok := raw.(*shadowResourceProvisionerApply)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'apply' shadow value: %#v", raw))
+ return nil
+ }
+
+ return result.ResultErr
+}
+
+func (p *shadowResourceProvisionerShadow) Stop() error {
+ // For the shadow, we always just return nil since a Stop indicates
+ // that we were interrupted and shadows are disabled during interrupts
+ // anyways.
+ return nil
+}
+
+// The structs for the various function calls are put below. These structs
+// are used to carry call information across the real/shadow boundaries.
+
+type shadowResourceProvisionerValidate struct {
+ Config *ResourceConfig
+ ResultWarn []string
+ ResultErr []error
+}
+
+type shadowResourceProvisionerApply struct {
+ Config *ResourceConfig
+ ResultErr error
+}
+
+func shadowResourceProvisionerValidateCompare(k, v interface{}) bool {
+ c, ok := k.(*ResourceConfig)
+ if !ok {
+ return false
+ }
+
+ result, ok := v.(*shadowResourceProvisionerValidate)
+ if !ok {
+ return false
+ }
+
+ return c.Equal(result.Config)
+}
+
+func shadowResourceProvisionerApplyCompare(k, v interface{}) bool {
+ c, ok := k.(*ResourceConfig)
+ if !ok {
+ return false
+ }
+
+ result, ok := v.(*shadowResourceProvisionerApply)
+ if !ok {
+ return false
+ }
+
+ return c.Equal(result.Config)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go
new file mode 100644
index 00000000..074b6824
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state.go
@@ -0,0 +1,2118 @@
+package terraform
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/go-version"
+ "github.com/hashicorp/terraform/config"
+ "github.com/mitchellh/copystructure"
+ "github.com/satori/go.uuid"
+)
+
+const (
+ // StateVersion is the current version for our state file
+ StateVersion = 3
+)
+
+// rootModulePath is the path of the root module
+var rootModulePath = []string{"root"}
+
+// normalizeModulePath takes a raw module path and returns a path that
+// has the rootModulePath prepended to it. If I could go back in time I
+// would've never had a rootModulePath (empty path would be root). We can
+// still fix this but thats a big refactor that my branch doesn't make sense
+// for. Instead, this function normalizes paths.
+func normalizeModulePath(p []string) []string {
+ k := len(rootModulePath)
+
+ // If we already have a root module prefix, we're done
+ if len(p) >= len(rootModulePath) {
+ if reflect.DeepEqual(p[:k], rootModulePath) {
+ return p
+ }
+ }
+
+ // None? Prefix it
+ result := make([]string, len(rootModulePath)+len(p))
+ copy(result, rootModulePath)
+ copy(result[k:], p)
+ return result
+}
+
+// State keeps track of a snapshot state-of-the-world that Terraform
+// can use to keep track of what real world resources it is actually
+// managing.
+type State struct {
+ // Version is the state file protocol version.
+ Version int `json:"version"`
+
+ // TFVersion is the version of Terraform that wrote this state.
+ TFVersion string `json:"terraform_version,omitempty"`
+
+ // Serial is incremented on any operation that modifies
+ // the State file. It is used to detect potentially conflicting
+ // updates.
+ Serial int64 `json:"serial"`
+
+ // Lineage is set when a new, blank state is created and then
+ // never updated. This allows us to determine whether the serials
+ // of two states can be meaningfully compared.
+ // Apart from the guarantee that collisions between two lineages
+ // are very unlikely, this value is opaque and external callers
+ // should only compare lineage strings byte-for-byte for equality.
+ Lineage string `json:"lineage"`
+
+ // Remote is used to track the metadata required to
+ // pull and push state files from a remote storage endpoint.
+ Remote *RemoteState `json:"remote,omitempty"`
+
+ // Backend tracks the configuration for the backend in use with
+ // this state. This is used to track any changes in the backend
+ // configuration.
+ Backend *BackendState `json:"backend,omitempty"`
+
+ // Modules contains all the modules in a breadth-first order
+ Modules []*ModuleState `json:"modules"`
+
+ mu sync.Mutex
+}
+
+func (s *State) Lock() { s.mu.Lock() }
+func (s *State) Unlock() { s.mu.Unlock() }
+
+// NewState is used to initialize a blank state
+func NewState() *State {
+ s := &State{}
+ s.init()
+ return s
+}
+
+// Children returns the ModuleStates that are direct children of
+// the given path. If the path is "root", for example, then children
+// returned might be "root.child", but not "root.child.grandchild".
+func (s *State) Children(path []string) []*ModuleState {
+ s.Lock()
+ defer s.Unlock()
+ // TODO: test
+
+ return s.children(path)
+}
+
+func (s *State) children(path []string) []*ModuleState {
+ result := make([]*ModuleState, 0)
+ for _, m := range s.Modules {
+ if m == nil {
+ continue
+ }
+
+ if len(m.Path) != len(path)+1 {
+ continue
+ }
+ if !reflect.DeepEqual(path, m.Path[:len(path)]) {
+ continue
+ }
+
+ result = append(result, m)
+ }
+
+ return result
+}
+
+// AddModule adds the module with the given path to the state.
+//
+// This should be the preferred method to add module states since it
+// allows us to optimize lookups later as well as control sorting.
+func (s *State) AddModule(path []string) *ModuleState {
+ s.Lock()
+ defer s.Unlock()
+
+ return s.addModule(path)
+}
+
+func (s *State) addModule(path []string) *ModuleState {
+ // check if the module exists first
+ m := s.moduleByPath(path)
+ if m != nil {
+ return m
+ }
+
+ m = &ModuleState{Path: path}
+ m.init()
+ s.Modules = append(s.Modules, m)
+ s.sort()
+ return m
+}
+
+// ModuleByPath is used to lookup the module state for the given path.
+// This should be the preferred lookup mechanism as it allows for future
+// lookup optimizations.
+func (s *State) ModuleByPath(path []string) *ModuleState {
+ if s == nil {
+ return nil
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ return s.moduleByPath(path)
+}
+
+func (s *State) moduleByPath(path []string) *ModuleState {
+ for _, mod := range s.Modules {
+ if mod == nil {
+ continue
+ }
+ if mod.Path == nil {
+ panic("missing module path")
+ }
+ if reflect.DeepEqual(mod.Path, path) {
+ return mod
+ }
+ }
+ return nil
+}
+
+// ModuleOrphans returns all the module orphans in this state by
+// returning their full paths. These paths can be used with ModuleByPath
+// to return the actual state.
+func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string {
+ s.Lock()
+ defer s.Unlock()
+
+ return s.moduleOrphans(path, c)
+
+}
+
+func (s *State) moduleOrphans(path []string, c *config.Config) [][]string {
+ // direct keeps track of what direct children we have both in our config
+ // and in our state. childrenKeys keeps track of what isn't an orphan.
+ direct := make(map[string]struct{})
+ childrenKeys := make(map[string]struct{})
+ if c != nil {
+ for _, m := range c.Modules {
+ childrenKeys[m.Name] = struct{}{}
+ direct[m.Name] = struct{}{}
+ }
+ }
+
+ // Go over the direct children and find any that aren't in our keys.
+ var orphans [][]string
+ for _, m := range s.children(path) {
+ key := m.Path[len(m.Path)-1]
+
+ // Record that we found this key as a direct child. We use this
+ // later to find orphan nested modules.
+ direct[key] = struct{}{}
+
+ // If we have a direct child still in our config, it is not an orphan
+ if _, ok := childrenKeys[key]; ok {
+ continue
+ }
+
+ orphans = append(orphans, m.Path)
+ }
+
+ // Find the orphans that are nested...
+ for _, m := range s.Modules {
+ if m == nil {
+ continue
+ }
+
+ // We only want modules that are at least grandchildren
+ if len(m.Path) < len(path)+2 {
+ continue
+ }
+
+ // If it isn't part of our tree, continue
+ if !reflect.DeepEqual(path, m.Path[:len(path)]) {
+ continue
+ }
+
+ // If we have the direct child, then just skip it.
+ key := m.Path[len(path)]
+ if _, ok := direct[key]; ok {
+ continue
+ }
+
+ orphanPath := m.Path[:len(path)+1]
+
+ // Don't double-add if we've already added this orphan (which can happen if
+ // there are multiple nested sub-modules that get orphaned together).
+ alreadyAdded := false
+ for _, o := range orphans {
+ if reflect.DeepEqual(o, orphanPath) {
+ alreadyAdded = true
+ break
+ }
+ }
+ if alreadyAdded {
+ continue
+ }
+
+ // Add this orphan
+ orphans = append(orphans, orphanPath)
+ }
+
+ return orphans
+}
+
+// Empty returns true if the state is empty.
+func (s *State) Empty() bool {
+ if s == nil {
+ return true
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ return len(s.Modules) == 0
+}
+
+// HasResources returns true if the state contains any resources.
+//
+// This is similar to !s.Empty, but returns true also in the case where the
+// state has modules but all of them are devoid of resources.
+func (s *State) HasResources() bool {
+ if s.Empty() {
+ return false
+ }
+
+ for _, mod := range s.Modules {
+ if len(mod.Resources) > 0 {
+ return true
+ }
+ }
+
+ return false
+}
+
+// IsRemote returns true if State represents a state that exists and is
+// remote.
+func (s *State) IsRemote() bool {
+ if s == nil {
+ return false
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Remote == nil {
+ return false
+ }
+ if s.Remote.Type == "" {
+ return false
+ }
+
+ return true
+}
+
+// Validate validates the integrity of this state file.
+//
+// Certain properties of the statefile are expected by Terraform in order
+// to behave properly. The core of Terraform will assume that once it
+// receives a State structure that it has been validated. This validation
+// check should be called to ensure that.
+//
+// If this returns an error, then the user should be notified. The error
+// response will include detailed information on the nature of the error.
+func (s *State) Validate() error {
+ s.Lock()
+ defer s.Unlock()
+
+ var result error
+
+ // !!!! FOR DEVELOPERS !!!!
+ //
+ // Any errors returned from this Validate function will BLOCK TERRAFORM
+ // from loading a state file. Therefore, this should only contain checks
+ // that are only resolvable through manual intervention.
+ //
+ // !!!! FOR DEVELOPERS !!!!
+
+ // Make sure there are no duplicate module states. We open a new
+ // block here so we can use basic variable names and future validations
+ // can do the same.
+ {
+ found := make(map[string]struct{})
+ for _, ms := range s.Modules {
+ if ms == nil {
+ continue
+ }
+
+ key := strings.Join(ms.Path, ".")
+ if _, ok := found[key]; ok {
+ result = multierror.Append(result, fmt.Errorf(
+ strings.TrimSpace(stateValidateErrMultiModule), key))
+ continue
+ }
+
+ found[key] = struct{}{}
+ }
+ }
+
+ return result
+}
+
+// Remove removes the item in the state at the given address, returning
+// any errors that may have occurred.
+//
+// If the address references a module state or resource, it will delete
+// all children as well. To check what will be deleted, use a StateFilter
+// first.
+func (s *State) Remove(addr ...string) error {
+ s.Lock()
+ defer s.Unlock()
+
+ // Filter out what we need to delete
+ filter := &StateFilter{State: s}
+ results, err := filter.Filter(addr...)
+ if err != nil {
+ return err
+ }
+
+ // If we have no results, just exit early, we're not going to do anything.
+ // While what happens below is fairly fast, this is an important early
+ // exit since the prune below might modify the state more and we don't
+ // want to modify the state if we don't have to.
+ if len(results) == 0 {
+ return nil
+ }
+
+ // Go through each result and grab what we need
+ removed := make(map[interface{}]struct{})
+ for _, r := range results {
+ // Convert the path to our own type
+ path := append([]string{"root"}, r.Path...)
+
+ // If we removed this already, then ignore
+ if _, ok := removed[r.Value]; ok {
+ continue
+ }
+
+ // If we removed the parent already, then ignore
+ if r.Parent != nil {
+ if _, ok := removed[r.Parent.Value]; ok {
+ continue
+ }
+ }
+
+ // Add this to the removed list
+ removed[r.Value] = struct{}{}
+
+ switch v := r.Value.(type) {
+ case *ModuleState:
+ s.removeModule(path, v)
+ case *ResourceState:
+ s.removeResource(path, v)
+ case *InstanceState:
+ s.removeInstance(path, r.Parent.Value.(*ResourceState), v)
+ default:
+ return fmt.Errorf("unknown type to delete: %T", r.Value)
+ }
+ }
+
+ // Prune since the removal functions often do the bare minimum to
+ // remove a thing and may leave around dangling empty modules, resources,
+ // etc. Prune will clean that all up.
+ s.prune()
+
+ return nil
+}
+
+func (s *State) removeModule(path []string, v *ModuleState) {
+ for i, m := range s.Modules {
+ if m == v {
+ s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil
+ return
+ }
+ }
+}
+
+func (s *State) removeResource(path []string, v *ResourceState) {
+ // Get the module this resource lives in. If it doesn't exist, we're done.
+ mod := s.moduleByPath(path)
+ if mod == nil {
+ return
+ }
+
+ // Find this resource. This is a O(N) lookup when if we had the key
+ // it could be O(1) but even with thousands of resources this shouldn't
+ // matter right now. We can easily up performance here when the time comes.
+ for k, r := range mod.Resources {
+ if r == v {
+ // Found it
+ delete(mod.Resources, k)
+ return
+ }
+ }
+}
+
+func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) {
+ // Go through the resource and find the instance that matches this
+ // (if any) and remove it.
+
+ // Check primary
+ if r.Primary == v {
+ r.Primary = nil
+ return
+ }
+
+ // Check lists
+ lists := [][]*InstanceState{r.Deposed}
+ for _, is := range lists {
+ for i, instance := range is {
+ if instance == v {
+ // Found it, remove it
+ is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil
+
+ // Done
+ return
+ }
+ }
+ }
+}
+
+// RootModule returns the ModuleState for the root module
+func (s *State) RootModule() *ModuleState {
+ root := s.ModuleByPath(rootModulePath)
+ if root == nil {
+ panic("missing root module")
+ }
+ return root
+}
+
+// Equal tests if one state is equal to another.
+func (s *State) Equal(other *State) bool {
+ // If one is nil, we do a direct check
+ if s == nil || other == nil {
+ return s == other
+ }
+
+ s.Lock()
+ defer s.Unlock()
+ return s.equal(other)
+}
+
+func (s *State) equal(other *State) bool {
+ if s == nil || other == nil {
+ return s == other
+ }
+
+ // If the versions are different, they're certainly not equal
+ if s.Version != other.Version {
+ return false
+ }
+
+ // If any of the modules are not equal, then this state isn't equal
+ if len(s.Modules) != len(other.Modules) {
+ return false
+ }
+ for _, m := range s.Modules {
+ // This isn't very optimal currently but works.
+ otherM := other.moduleByPath(m.Path)
+ if otherM == nil {
+ return false
+ }
+
+ // If they're not equal, then we're not equal!
+ if !m.Equal(otherM) {
+ return false
+ }
+ }
+
+ return true
+}
+
+type StateAgeComparison int
+
+const (
+ StateAgeEqual StateAgeComparison = 0
+ StateAgeReceiverNewer StateAgeComparison = 1
+ StateAgeReceiverOlder StateAgeComparison = -1
+)
+
+// CompareAges compares one state with another for which is "older".
+//
+// This is a simple check using the state's serial, and is thus only as
+// reliable as the serial itself. In the normal case, only one state
+// exists for a given combination of lineage/serial, but Terraform
+// does not guarantee this and so the result of this method should be
+// used with care.
+//
+// Returns an integer that is negative if the receiver is older than
+// the argument, positive if the converse, and zero if they are equal.
+// An error is returned if the two states are not of the same lineage,
+// in which case the integer returned has no meaning.
+func (s *State) CompareAges(other *State) (StateAgeComparison, error) {
+ // nil states are "older" than actual states
+ switch {
+ case s != nil && other == nil:
+ return StateAgeReceiverNewer, nil
+ case s == nil && other != nil:
+ return StateAgeReceiverOlder, nil
+ case s == nil && other == nil:
+ return StateAgeEqual, nil
+ }
+
+ if !s.SameLineage(other) {
+ return StateAgeEqual, fmt.Errorf(
+ "can't compare two states of differing lineage",
+ )
+ }
+
+ s.Lock()
+ defer s.Unlock()
+
+ switch {
+ case s.Serial < other.Serial:
+ return StateAgeReceiverOlder, nil
+ case s.Serial > other.Serial:
+ return StateAgeReceiverNewer, nil
+ default:
+ return StateAgeEqual, nil
+ }
+}
+
+// SameLineage returns true only if the state given in argument belongs
+// to the same "lineage" of states as the receiver.
+func (s *State) SameLineage(other *State) bool {
+ s.Lock()
+ defer s.Unlock()
+
+ // If one of the states has no lineage then it is assumed to predate
+ // this concept, and so we'll accept it as belonging to any lineage
+ // so that a lineage string can be assigned to newer versions
+ // without breaking compatibility with older versions.
+ if s.Lineage == "" || other.Lineage == "" {
+ return true
+ }
+
+ return s.Lineage == other.Lineage
+}
+
+// DeepCopy performs a deep copy of the state structure and returns
+// a new structure.
+func (s *State) DeepCopy() *State {
+ copy, err := copystructure.Config{Lock: true}.Copy(s)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*State)
+}
+
+// IncrementSerialMaybe increments the serial number of this state
+// if it different from the other state.
+func (s *State) IncrementSerialMaybe(other *State) {
+ if s == nil {
+ return
+ }
+ if other == nil {
+ return
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Serial > other.Serial {
+ return
+ }
+ if other.TFVersion != s.TFVersion || !s.equal(other) {
+ if other.Serial > s.Serial {
+ s.Serial = other.Serial
+ }
+
+ s.Serial++
+ }
+}
+
+// FromFutureTerraform checks if this state was written by a Terraform
+// version from the future.
+func (s *State) FromFutureTerraform() bool {
+ s.Lock()
+ defer s.Unlock()
+
+ // No TF version means it is certainly from the past
+ if s.TFVersion == "" {
+ return false
+ }
+
+ v := version.Must(version.NewVersion(s.TFVersion))
+ return SemVersion.LessThan(v)
+}
+
+func (s *State) Init() {
+ s.Lock()
+ defer s.Unlock()
+ s.init()
+}
+
+func (s *State) init() {
+ if s.Version == 0 {
+ s.Version = StateVersion
+ }
+ if s.moduleByPath(rootModulePath) == nil {
+ s.addModule(rootModulePath)
+ }
+ s.ensureHasLineage()
+
+ for _, mod := range s.Modules {
+ if mod != nil {
+ mod.init()
+ }
+ }
+
+ if s.Remote != nil {
+ s.Remote.init()
+ }
+
+}
+
+func (s *State) EnsureHasLineage() {
+ s.Lock()
+ defer s.Unlock()
+
+ s.ensureHasLineage()
+}
+
+func (s *State) ensureHasLineage() {
+ if s.Lineage == "" {
+ s.Lineage = uuid.NewV4().String()
+ log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage)
+ } else {
+ log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage)
+ }
+}
+
+// AddModuleState insert this module state and override any existing ModuleState
+func (s *State) AddModuleState(mod *ModuleState) {
+ mod.init()
+ s.Lock()
+ defer s.Unlock()
+
+ s.addModuleState(mod)
+}
+
+func (s *State) addModuleState(mod *ModuleState) {
+ for i, m := range s.Modules {
+ if reflect.DeepEqual(m.Path, mod.Path) {
+ s.Modules[i] = mod
+ return
+ }
+ }
+
+ s.Modules = append(s.Modules, mod)
+ s.sort()
+}
+
+// prune is used to remove any resources that are no longer required
+func (s *State) prune() {
+ if s == nil {
+ return
+ }
+
+ // Filter out empty modules.
+ // A module is always assumed to have a path, and it's length isn't always
+ // bounds checked later on. Modules may be "emptied" during destroy, but we
+ // never want to store those in the state.
+ for i := 0; i < len(s.Modules); i++ {
+ if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 {
+ s.Modules = append(s.Modules[:i], s.Modules[i+1:]...)
+ i--
+ }
+ }
+
+ for _, mod := range s.Modules {
+ mod.prune()
+ }
+ if s.Remote != nil && s.Remote.Empty() {
+ s.Remote = nil
+ }
+}
+
+// sort sorts the modules
+func (s *State) sort() {
+ sort.Sort(moduleStateSort(s.Modules))
+
+ // Allow modules to be sorted
+ for _, m := range s.Modules {
+ if m != nil {
+ m.sort()
+ }
+ }
+}
+
+func (s *State) String() string {
+ if s == nil {
+ return "<nil>"
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ var buf bytes.Buffer
+ for _, m := range s.Modules {
+ mStr := m.String()
+
+ // If we're the root module, we just write the output directly.
+ if reflect.DeepEqual(m.Path, rootModulePath) {
+ buf.WriteString(mStr + "\n")
+ continue
+ }
+
+ buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], ".")))
+
+ s := bufio.NewScanner(strings.NewReader(mStr))
+ for s.Scan() {
+ text := s.Text()
+ if text != "" {
+ text = " " + text
+ }
+
+ buf.WriteString(fmt.Sprintf("%s\n", text))
+ }
+ }
+
+ return strings.TrimSpace(buf.String())
+}
+
+// BackendState stores the configuration to connect to a remote backend.
+type BackendState struct {
+ Type string `json:"type"` // Backend type
+ Config map[string]interface{} `json:"config"` // Backend raw config
+
+ // Hash is the hash code to uniquely identify the original source
+ // configuration. We use this to detect when there is a change in
+ // configuration even when "type" isn't changed.
+ Hash uint64 `json:"hash"`
+}
+
+// Empty returns true if BackendState has no state.
+func (s *BackendState) Empty() bool {
+ return s == nil || s.Type == ""
+}
+
+// Rehash returns a unique content hash for this backend's configuration
+// as a uint64 value.
+// The Hash stored in the backend state needs to match the config itself, but
+// we need to compare the backend config after it has been combined with all
+// options.
+// This function must match the implementation used by config.Backend.
+func (s *BackendState) Rehash() uint64 {
+ if s == nil {
+ return 0
+ }
+
+ cfg := config.Backend{
+ Type: s.Type,
+ RawConfig: &config.RawConfig{
+ Raw: s.Config,
+ },
+ }
+
+ return cfg.Rehash()
+}
+
+// RemoteState is used to track the information about a remote
+// state store that we push/pull state to.
+type RemoteState struct {
+ // Type controls the client we use for the remote state
+ Type string `json:"type"`
+
+ // Config is used to store arbitrary configuration that
+ // is type specific
+ Config map[string]string `json:"config"`
+
+ mu sync.Mutex
+}
+
+func (s *RemoteState) Lock() { s.mu.Lock() }
+func (s *RemoteState) Unlock() { s.mu.Unlock() }
+
+func (r *RemoteState) init() {
+ r.Lock()
+ defer r.Unlock()
+
+ if r.Config == nil {
+ r.Config = make(map[string]string)
+ }
+}
+
+func (r *RemoteState) deepcopy() *RemoteState {
+ r.Lock()
+ defer r.Unlock()
+
+ confCopy := make(map[string]string, len(r.Config))
+ for k, v := range r.Config {
+ confCopy[k] = v
+ }
+ return &RemoteState{
+ Type: r.Type,
+ Config: confCopy,
+ }
+}
+
+func (r *RemoteState) Empty() bool {
+ if r == nil {
+ return true
+ }
+ r.Lock()
+ defer r.Unlock()
+
+ return r.Type == ""
+}
+
+func (r *RemoteState) Equals(other *RemoteState) bool {
+ r.Lock()
+ defer r.Unlock()
+
+ if r.Type != other.Type {
+ return false
+ }
+ if len(r.Config) != len(other.Config) {
+ return false
+ }
+ for k, v := range r.Config {
+ if other.Config[k] != v {
+ return false
+ }
+ }
+ return true
+}
+
+// OutputState is used to track the state relevant to a single output.
+type OutputState struct {
+ // Sensitive describes whether the output is considered sensitive,
+ // which may lead to masking the value on screen in some cases.
+ Sensitive bool `json:"sensitive"`
+ // Type describes the structure of Value. Valid values are "string",
+ // "map" and "list"
+ Type string `json:"type"`
+ // Value contains the value of the output, in the structure described
+ // by the Type field.
+ Value interface{} `json:"value"`
+
+ mu sync.Mutex
+}
+
+func (s *OutputState) Lock() { s.mu.Lock() }
+func (s *OutputState) Unlock() { s.mu.Unlock() }
+
+func (s *OutputState) String() string {
+ return fmt.Sprintf("%#v", s.Value)
+}
+
+// Equal compares two OutputState structures for equality. nil values are
+// considered equal.
+func (s *OutputState) Equal(other *OutputState) bool {
+ if s == nil && other == nil {
+ return true
+ }
+
+ if s == nil || other == nil {
+ return false
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Type != other.Type {
+ return false
+ }
+
+ if s.Sensitive != other.Sensitive {
+ return false
+ }
+
+ if !reflect.DeepEqual(s.Value, other.Value) {
+ return false
+ }
+
+ return true
+}
+
+func (s *OutputState) deepcopy() *OutputState {
+ if s == nil {
+ return nil
+ }
+
+ stateCopy, err := copystructure.Config{Lock: true}.Copy(s)
+ if err != nil {
+ panic(fmt.Errorf("Error copying output value: %s", err))
+ }
+
+ return stateCopy.(*OutputState)
+}
+
+// ModuleState is used to track all the state relevant to a single
+// module. Previous to Terraform 0.3, all state belonged to the "root"
+// module.
+type ModuleState struct {
+ // Path is the import path from the root module. Modules imports are
+ // always disjoint, so the path represents amodule tree
+ Path []string `json:"path"`
+
+ // Outputs declared by the module and maintained for each module
+ // even though only the root module technically needs to be kept.
+ // This allows operators to inspect values at the boundaries.
+ Outputs map[string]*OutputState `json:"outputs"`
+
+ // Resources is a mapping of the logically named resource to
+ // the state of the resource. Each resource may actually have
+ // N instances underneath, although a user only needs to think
+ // about the 1:1 case.
+ Resources map[string]*ResourceState `json:"resources"`
+
+ // Dependencies are a list of things that this module relies on
+ // existing to remain intact. For example: an module may depend
+ // on a VPC ID given by an aws_vpc resource.
+ //
+ // Terraform uses this information to build valid destruction
+ // orders and to warn the user if they're destroying a module that
+ // another resource depends on.
+ //
+ // Things can be put into this list that may not be managed by
+ // Terraform. If Terraform doesn't find a matching ID in the
+ // overall state, then it assumes it isn't managed and doesn't
+ // worry about it.
+ Dependencies []string `json:"depends_on"`
+
+ mu sync.Mutex
+}
+
+func (s *ModuleState) Lock() { s.mu.Lock() }
+func (s *ModuleState) Unlock() { s.mu.Unlock() }
+
+// Equal tests whether one module state is equal to another.
+func (m *ModuleState) Equal(other *ModuleState) bool {
+ m.Lock()
+ defer m.Unlock()
+
+ // Paths must be equal
+ if !reflect.DeepEqual(m.Path, other.Path) {
+ return false
+ }
+
+ // Outputs must be equal
+ if len(m.Outputs) != len(other.Outputs) {
+ return false
+ }
+ for k, v := range m.Outputs {
+ if !other.Outputs[k].Equal(v) {
+ return false
+ }
+ }
+
+ // Dependencies must be equal. This sorts these in place but
+ // this shouldn't cause any problems.
+ sort.Strings(m.Dependencies)
+ sort.Strings(other.Dependencies)
+ if len(m.Dependencies) != len(other.Dependencies) {
+ return false
+ }
+ for i, d := range m.Dependencies {
+ if other.Dependencies[i] != d {
+ return false
+ }
+ }
+
+ // Resources must be equal
+ if len(m.Resources) != len(other.Resources) {
+ return false
+ }
+ for k, r := range m.Resources {
+ otherR, ok := other.Resources[k]
+ if !ok {
+ return false
+ }
+
+ if !r.Equal(otherR) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// IsRoot says whether or not this module diff is for the root module.
+func (m *ModuleState) IsRoot() bool {
+ m.Lock()
+ defer m.Unlock()
+ return reflect.DeepEqual(m.Path, rootModulePath)
+}
+
+// IsDescendent returns true if other is a descendent of this module.
+func (m *ModuleState) IsDescendent(other *ModuleState) bool {
+ m.Lock()
+ defer m.Unlock()
+
+ i := len(m.Path)
+ return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path)
+}
+
+// Orphans returns a list of keys of resources that are in the State
+// but aren't present in the configuration itself. Hence, these keys
+// represent the state of resources that are orphans.
+func (m *ModuleState) Orphans(c *config.Config) []string {
+ m.Lock()
+ defer m.Unlock()
+
+ keys := make(map[string]struct{})
+ for k, _ := range m.Resources {
+ keys[k] = struct{}{}
+ }
+
+ if c != nil {
+ for _, r := range c.Resources {
+ delete(keys, r.Id())
+
+ for k, _ := range keys {
+ if strings.HasPrefix(k, r.Id()+".") {
+ delete(keys, k)
+ }
+ }
+ }
+ }
+
+ result := make([]string, 0, len(keys))
+ for k, _ := range keys {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+// View returns a view with the given resource prefix.
+func (m *ModuleState) View(id string) *ModuleState {
+ if m == nil {
+ return m
+ }
+
+ r := m.deepcopy()
+ for k, _ := range r.Resources {
+ if id == k || strings.HasPrefix(k, id+".") {
+ continue
+ }
+
+ delete(r.Resources, k)
+ }
+
+ return r
+}
+
+func (m *ModuleState) init() {
+ m.Lock()
+ defer m.Unlock()
+
+ if m.Path == nil {
+ m.Path = []string{}
+ }
+ if m.Outputs == nil {
+ m.Outputs = make(map[string]*OutputState)
+ }
+ if m.Resources == nil {
+ m.Resources = make(map[string]*ResourceState)
+ }
+
+ if m.Dependencies == nil {
+ m.Dependencies = make([]string, 0)
+ }
+
+ for _, rs := range m.Resources {
+ rs.init()
+ }
+}
+
+func (m *ModuleState) deepcopy() *ModuleState {
+ if m == nil {
+ return nil
+ }
+
+ stateCopy, err := copystructure.Config{Lock: true}.Copy(m)
+ if err != nil {
+ panic(err)
+ }
+
+ return stateCopy.(*ModuleState)
+}
+
+// prune is used to remove any resources that are no longer required
+func (m *ModuleState) prune() {
+ m.Lock()
+ defer m.Unlock()
+
+ for k, v := range m.Resources {
+ if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 {
+ delete(m.Resources, k)
+ continue
+ }
+
+ v.prune()
+ }
+
+ for k, v := range m.Outputs {
+ if v.Value == config.UnknownVariableValue {
+ delete(m.Outputs, k)
+ }
+ }
+
+ m.Dependencies = uniqueStrings(m.Dependencies)
+}
+
+func (m *ModuleState) sort() {
+ for _, v := range m.Resources {
+ v.sort()
+ }
+}
+
+func (m *ModuleState) String() string {
+ m.Lock()
+ defer m.Unlock()
+
+ var buf bytes.Buffer
+
+ if len(m.Resources) == 0 {
+ buf.WriteString("<no state>")
+ }
+
+ names := make([]string, 0, len(m.Resources))
+ for name, _ := range m.Resources {
+ names = append(names, name)
+ }
+
+ sort.Sort(resourceNameSort(names))
+
+ for _, k := range names {
+ rs := m.Resources[k]
+ var id string
+ if rs.Primary != nil {
+ id = rs.Primary.ID
+ }
+ if id == "" {
+ id = "<not created>"
+ }
+
+ taintStr := ""
+ if rs.Primary.Tainted {
+ taintStr = " (tainted)"
+ }
+
+ deposedStr := ""
+ if len(rs.Deposed) > 0 {
+ deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed))
+ }
+
+ buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr))
+ buf.WriteString(fmt.Sprintf(" ID = %s\n", id))
+ if rs.Provider != "" {
+ buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider))
+ }
+
+ var attributes map[string]string
+ if rs.Primary != nil {
+ attributes = rs.Primary.Attributes
+ }
+ attrKeys := make([]string, 0, len(attributes))
+ for ak, _ := range attributes {
+ if ak == "id" {
+ continue
+ }
+
+ attrKeys = append(attrKeys, ak)
+ }
+
+ sort.Strings(attrKeys)
+
+ for _, ak := range attrKeys {
+ av := attributes[ak]
+ buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av))
+ }
+
+ for idx, t := range rs.Deposed {
+ taintStr := ""
+ if t.Tainted {
+ taintStr = " (tainted)"
+ }
+ buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr))
+ }
+
+ if len(rs.Dependencies) > 0 {
+ buf.WriteString(fmt.Sprintf("\n Dependencies:\n"))
+ for _, dep := range rs.Dependencies {
+ buf.WriteString(fmt.Sprintf(" %s\n", dep))
+ }
+ }
+ }
+
+ if len(m.Outputs) > 0 {
+ buf.WriteString("\nOutputs:\n\n")
+
+ ks := make([]string, 0, len(m.Outputs))
+ for k, _ := range m.Outputs {
+ ks = append(ks, k)
+ }
+
+ sort.Strings(ks)
+
+ for _, k := range ks {
+ v := m.Outputs[k]
+ switch vTyped := v.Value.(type) {
+ case string:
+ buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
+ case []interface{}:
+ buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
+ case map[string]interface{}:
+ var mapKeys []string
+ for key, _ := range vTyped {
+ mapKeys = append(mapKeys, key)
+ }
+ sort.Strings(mapKeys)
+
+ var mapBuf bytes.Buffer
+ mapBuf.WriteString("{")
+ for _, key := range mapKeys {
+ mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key]))
+ }
+ mapBuf.WriteString("}")
+
+ buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String()))
+ }
+ }
+ }
+
+ return buf.String()
+}
+
+// ResourceStateKey is a structured representation of the key used for the
+// ModuleState.Resources mapping
+type ResourceStateKey struct {
+ Name string
+ Type string
+ Mode config.ResourceMode
+ Index int
+}
+
+// Equal determines whether two ResourceStateKeys are the same
+func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool {
+ if rsk == nil || other == nil {
+ return false
+ }
+ if rsk.Mode != other.Mode {
+ return false
+ }
+ if rsk.Type != other.Type {
+ return false
+ }
+ if rsk.Name != other.Name {
+ return false
+ }
+ if rsk.Index != other.Index {
+ return false
+ }
+ return true
+}
+
+func (rsk *ResourceStateKey) String() string {
+ if rsk == nil {
+ return ""
+ }
+ var prefix string
+ switch rsk.Mode {
+ case config.ManagedResourceMode:
+ prefix = ""
+ case config.DataResourceMode:
+ prefix = "data."
+ default:
+ panic(fmt.Errorf("unknown resource mode %s", rsk.Mode))
+ }
+ if rsk.Index == -1 {
+ return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name)
+ }
+ return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index)
+}
+
+// ParseResourceStateKey accepts a key in the format used by
+// ModuleState.Resources and returns a resource name and resource index. In the
+// state, a resource has the format "type.name.index" or "type.name". In the
+// latter case, the index is returned as -1.
+func ParseResourceStateKey(k string) (*ResourceStateKey, error) {
+ parts := strings.Split(k, ".")
+ mode := config.ManagedResourceMode
+ if len(parts) > 0 && parts[0] == "data" {
+ mode = config.DataResourceMode
+ // Don't need the constant "data" prefix for parsing
+ // now that we've figured out the mode.
+ parts = parts[1:]
+ }
+ if len(parts) < 2 || len(parts) > 3 {
+ return nil, fmt.Errorf("Malformed resource state key: %s", k)
+ }
+ rsk := &ResourceStateKey{
+ Mode: mode,
+ Type: parts[0],
+ Name: parts[1],
+ Index: -1,
+ }
+ if len(parts) == 3 {
+ index, err := strconv.Atoi(parts[2])
+ if err != nil {
+ return nil, fmt.Errorf("Malformed resource state key index: %s", k)
+ }
+ rsk.Index = index
+ }
+ return rsk, nil
+}
+
+// ResourceState holds the state of a resource that is used so that
+// a provider can find and manage an existing resource as well as for
+// storing attributes that are used to populate variables of child
+// resources.
+//
+// Attributes has attributes about the created resource that are
+// queryable in interpolation: "${type.id.attr}"
+//
+// Extra is just extra data that a provider can return that we store
+// for later, but is not exposed in any way to the user.
+//
+type ResourceState struct {
+ // This is filled in and managed by Terraform, and is the resource
+ // type itself such as "mycloud_instance". If a resource provider sets
+ // this value, it won't be persisted.
+ Type string `json:"type"`
+
+ // Dependencies are a list of things that this resource relies on
+ // existing to remain intact. For example: an AWS instance might
+ // depend on a subnet (which itself might depend on a VPC, and so
+ // on).
+ //
+ // Terraform uses this information to build valid destruction
+ // orders and to warn the user if they're destroying a resource that
+ // another resource depends on.
+ //
+ // Things can be put into this list that may not be managed by
+ // Terraform. If Terraform doesn't find a matching ID in the
+ // overall state, then it assumes it isn't managed and doesn't
+ // worry about it.
+ Dependencies []string `json:"depends_on"`
+
+ // Primary is the current active instance for this resource.
+ // It can be replaced but only after a successful creation.
+ // This is the instances on which providers will act.
+ Primary *InstanceState `json:"primary"`
+
+ // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
+ // Primary is Deposed to get it out of the way for the replacement Primary to
+ // be created by Apply. If the replacement Primary creates successfully, the
+ // Deposed instance is cleaned up.
+ //
+ // If there were problems creating the replacement Primary, the Deposed
+ // instance and the (now tainted) replacement Primary will be swapped so the
+ // tainted replacement will be cleaned up instead.
+ //
+ // An instance will remain in the Deposed list until it is successfully
+ // destroyed and purged.
+ Deposed []*InstanceState `json:"deposed"`
+
+ // Provider is used when a resource is connected to a provider with an alias.
+ // If this string is empty, the resource is connected to the default provider,
+ // e.g. "aws_instance" goes with the "aws" provider.
+ // If the resource block contained a "provider" key, that value will be set here.
+ Provider string `json:"provider"`
+
+ mu sync.Mutex
+}
+
+func (s *ResourceState) Lock() { s.mu.Lock() }
+func (s *ResourceState) Unlock() { s.mu.Unlock() }
+
+// Equal tests whether two ResourceStates are equal.
+func (s *ResourceState) Equal(other *ResourceState) bool {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Type != other.Type {
+ return false
+ }
+
+ if s.Provider != other.Provider {
+ return false
+ }
+
+ // Dependencies must be equal
+ sort.Strings(s.Dependencies)
+ sort.Strings(other.Dependencies)
+ if len(s.Dependencies) != len(other.Dependencies) {
+ return false
+ }
+ for i, d := range s.Dependencies {
+ if other.Dependencies[i] != d {
+ return false
+ }
+ }
+
+ // States must be equal
+ if !s.Primary.Equal(other.Primary) {
+ return false
+ }
+
+ return true
+}
+
+// Taint marks a resource as tainted.
+func (s *ResourceState) Taint() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Primary != nil {
+ s.Primary.Tainted = true
+ }
+}
+
+// Untaint unmarks a resource as tainted.
+func (s *ResourceState) Untaint() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Primary != nil {
+ s.Primary.Tainted = false
+ }
+}
+
+func (s *ResourceState) init() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Primary == nil {
+ s.Primary = &InstanceState{}
+ }
+ s.Primary.init()
+
+ if s.Dependencies == nil {
+ s.Dependencies = []string{}
+ }
+
+ if s.Deposed == nil {
+ s.Deposed = make([]*InstanceState, 0)
+ }
+}
+
+func (s *ResourceState) deepcopy() *ResourceState {
+ copy, err := copystructure.Config{Lock: true}.Copy(s)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*ResourceState)
+}
+
+// prune is used to remove any instances that are no longer required
+func (s *ResourceState) prune() {
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.Deposed)
+ for i := 0; i < n; i++ {
+ inst := s.Deposed[i]
+ if inst == nil || inst.ID == "" {
+ copy(s.Deposed[i:], s.Deposed[i+1:])
+ s.Deposed[n-1] = nil
+ n--
+ i--
+ }
+ }
+ s.Deposed = s.Deposed[:n]
+
+ s.Dependencies = uniqueStrings(s.Dependencies)
+}
+
+func (s *ResourceState) sort() {
+ s.Lock()
+ defer s.Unlock()
+
+ sort.Strings(s.Dependencies)
+}
+
+func (s *ResourceState) String() string {
+ s.Lock()
+ defer s.Unlock()
+
+ var buf bytes.Buffer
+ buf.WriteString(fmt.Sprintf("Type = %s", s.Type))
+ return buf.String()
+}
+
+// InstanceState is used to track the unique state information belonging
+// to a given instance.
+type InstanceState struct {
+ // A unique ID for this resource. This is opaque to Terraform
+ // and is only meant as a lookup mechanism for the providers.
+ ID string `json:"id"`
+
+ // Attributes are basic information about the resource. Any keys here
+ // are accessible in variable format within Terraform configurations:
+ // ${resourcetype.name.attribute}.
+ Attributes map[string]string `json:"attributes"`
+
+ // Ephemeral is used to store any state associated with this instance
+ // that is necessary for the Terraform run to complete, but is not
+ // persisted to a state file.
+ Ephemeral EphemeralState `json:"-"`
+
+ // Meta is a simple K/V map that is persisted to the State but otherwise
+ // ignored by Terraform core. It's meant to be used for accounting by
+ // external client code. The value here must only contain Go primitives
+ // and collections.
+ Meta map[string]interface{} `json:"meta"`
+
+ // Tainted is used to mark a resource for recreation.
+ Tainted bool `json:"tainted"`
+
+ mu sync.Mutex
+}
+
+func (s *InstanceState) Lock() { s.mu.Lock() }
+func (s *InstanceState) Unlock() { s.mu.Unlock() }
+
+func (s *InstanceState) init() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Attributes == nil {
+ s.Attributes = make(map[string]string)
+ }
+ if s.Meta == nil {
+ s.Meta = make(map[string]interface{})
+ }
+ s.Ephemeral.init()
+}
+
+// Copy all the Fields from another InstanceState
+func (s *InstanceState) Set(from *InstanceState) {
+ s.Lock()
+ defer s.Unlock()
+
+ from.Lock()
+ defer from.Unlock()
+
+ s.ID = from.ID
+ s.Attributes = from.Attributes
+ s.Ephemeral = from.Ephemeral
+ s.Meta = from.Meta
+ s.Tainted = from.Tainted
+}
+
+func (s *InstanceState) DeepCopy() *InstanceState {
+ copy, err := copystructure.Config{Lock: true}.Copy(s)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*InstanceState)
+}
+
+func (s *InstanceState) Empty() bool {
+ if s == nil {
+ return true
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ return s.ID == ""
+}
+
+func (s *InstanceState) Equal(other *InstanceState) bool {
+ // Short circuit some nil checks
+ if s == nil || other == nil {
+ return s == other
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ // IDs must be equal
+ if s.ID != other.ID {
+ return false
+ }
+
+ // Attributes must be equal
+ if len(s.Attributes) != len(other.Attributes) {
+ return false
+ }
+ for k, v := range s.Attributes {
+ otherV, ok := other.Attributes[k]
+ if !ok {
+ return false
+ }
+
+ if v != otherV {
+ return false
+ }
+ }
+
+ // Meta must be equal
+ if len(s.Meta) != len(other.Meta) {
+ return false
+ }
+ if s.Meta != nil && other.Meta != nil {
+ // We only do the deep check if both are non-nil. If one is nil
+ // we treat it as equal since their lengths are both zero (check
+ // above).
+ if !reflect.DeepEqual(s.Meta, other.Meta) {
+ return false
+ }
+ }
+
+ if s.Tainted != other.Tainted {
+ return false
+ }
+
+ return true
+}
+
+// MergeDiff takes a ResourceDiff and merges the attributes into
+// this resource state in order to generate a new state. This new
+// state can be used to provide updated attribute lookups for
+// variable interpolation.
+//
+// If the diff attribute requires computing the value, and hence
+// won't be available until apply, the value is replaced with the
+// computeID.
+func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState {
+ result := s.DeepCopy()
+ if result == nil {
+ result = new(InstanceState)
+ }
+ result.init()
+
+ if s != nil {
+ s.Lock()
+ defer s.Unlock()
+ for k, v := range s.Attributes {
+ result.Attributes[k] = v
+ }
+ }
+ if d != nil {
+ for k, diff := range d.CopyAttributes() {
+ if diff.NewRemoved {
+ delete(result.Attributes, k)
+ continue
+ }
+ if diff.NewComputed {
+ result.Attributes[k] = config.UnknownVariableValue
+ continue
+ }
+
+ result.Attributes[k] = diff.New
+ }
+ }
+
+ return result
+}
+
+func (s *InstanceState) String() string {
+ s.Lock()
+ defer s.Unlock()
+
+ var buf bytes.Buffer
+
+ if s == nil || s.ID == "" {
+ return "<not created>"
+ }
+
+ buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID))
+
+ attributes := s.Attributes
+ attrKeys := make([]string, 0, len(attributes))
+ for ak, _ := range attributes {
+ if ak == "id" {
+ continue
+ }
+
+ attrKeys = append(attrKeys, ak)
+ }
+ sort.Strings(attrKeys)
+
+ for _, ak := range attrKeys {
+ av := attributes[ak]
+ buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av))
+ }
+
+ buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted))
+
+ return buf.String()
+}
+
+// EphemeralState is used for transient state that is only kept in-memory
+type EphemeralState struct {
+ // ConnInfo is used for the providers to export information which is
+ // used to connect to the resource for provisioning. For example,
+ // this could contain SSH or WinRM credentials.
+ ConnInfo map[string]string `json:"-"`
+
+ // Type is used to specify the resource type for this instance. This is only
+ // required for import operations (as documented). If the documentation
+ // doesn't state that you need to set this, then don't worry about
+ // setting it.
+ Type string `json:"-"`
+}
+
+func (e *EphemeralState) init() {
+ if e.ConnInfo == nil {
+ e.ConnInfo = make(map[string]string)
+ }
+}
+
+func (e *EphemeralState) DeepCopy() *EphemeralState {
+ copy, err := copystructure.Config{Lock: true}.Copy(e)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*EphemeralState)
+}
+
+type jsonStateVersionIdentifier struct {
+ Version int `json:"version"`
+}
+
+// Check if this is a V0 format - the magic bytes at the start of the file
+// should be "tfstate" if so. We no longer support upgrading this type of
+// state but return an error message explaining to a user how they can
+// upgrade via the 0.6.x series.
+func testForV0State(buf *bufio.Reader) error {
+ start, err := buf.Peek(len("tfstate"))
+ if err != nil {
+ return fmt.Errorf("Failed to check for magic bytes: %v", err)
+ }
+ if string(start) == "tfstate" {
+ return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" +
+ "format which was used prior to Terraform 0.3. Please upgrade\n" +
+ "this state file using Terraform 0.6.16 prior to using it with\n" +
+ "Terraform 0.7.")
+ }
+
+ return nil
+}
+
+// ErrNoState is returned by ReadState when the io.Reader contains no data
+var ErrNoState = errors.New("no state")
+
+// ReadState reads a state structure out of a reader in the format that
+// was written by WriteState.
+func ReadState(src io.Reader) (*State, error) {
+ buf := bufio.NewReader(src)
+ if _, err := buf.Peek(1); err != nil {
+ // the error is either io.EOF or "invalid argument", and both are from
+ // an empty state.
+ return nil, ErrNoState
+ }
+
+ if err := testForV0State(buf); err != nil {
+ return nil, err
+ }
+
+ // If we are JSON we buffer the whole thing in memory so we can read it twice.
+ // This is suboptimal, but will work for now.
+ jsonBytes, err := ioutil.ReadAll(buf)
+ if err != nil {
+ return nil, fmt.Errorf("Reading state file failed: %v", err)
+ }
+
+ versionIdentifier := &jsonStateVersionIdentifier{}
+ if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil {
+ return nil, fmt.Errorf("Decoding state file version failed: %v", err)
+ }
+
+ var result *State
+ switch versionIdentifier.Version {
+ case 0:
+ return nil, fmt.Errorf("State version 0 is not supported as JSON.")
+ case 1:
+ v1State, err := ReadStateV1(jsonBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ v2State, err := upgradeStateV1ToV2(v1State)
+ if err != nil {
+ return nil, err
+ }
+
+ v3State, err := upgradeStateV2ToV3(v2State)
+ if err != nil {
+ return nil, err
+ }
+
+ // increment the Serial whenever we upgrade state
+ v3State.Serial++
+ result = v3State
+ case 2:
+ v2State, err := ReadStateV2(jsonBytes)
+ if err != nil {
+ return nil, err
+ }
+ v3State, err := upgradeStateV2ToV3(v2State)
+ if err != nil {
+ return nil, err
+ }
+
+ v3State.Serial++
+ result = v3State
+ case 3:
+ v3State, err := ReadStateV3(jsonBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ result = v3State
+ default:
+ return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
+ SemVersion.String(), versionIdentifier.Version)
+ }
+
+ // If we reached this place we must have a result set
+ if result == nil {
+ panic("resulting state in load not set, assertion failed")
+ }
+
+ // Prune the state when read it. Its possible to write unpruned states or
+ // for a user to make a state unpruned (nil-ing a module state for example).
+ result.prune()
+
+ // Validate the state file is valid
+ if err := result.Validate(); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+func ReadStateV1(jsonBytes []byte) (*stateV1, error) {
+ v1State := &stateV1{}
+ if err := json.Unmarshal(jsonBytes, v1State); err != nil {
+ return nil, fmt.Errorf("Decoding state file failed: %v", err)
+ }
+
+ if v1State.Version != 1 {
+ return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+
+ "read %d, expected 1", v1State.Version)
+ }
+
+ return v1State, nil
+}
+
+func ReadStateV2(jsonBytes []byte) (*State, error) {
+ state := &State{}
+ if err := json.Unmarshal(jsonBytes, state); err != nil {
+ return nil, fmt.Errorf("Decoding state file failed: %v", err)
+ }
+
+ // Check the version, this to ensure we don't read a future
+ // version that we don't understand
+ if state.Version > StateVersion {
+ return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
+ SemVersion.String(), state.Version)
+ }
+
+ // Make sure the version is semantic
+ if state.TFVersion != "" {
+ if _, err := version.NewVersion(state.TFVersion); err != nil {
+ return nil, fmt.Errorf(
+ "State contains invalid version: %s\n\n"+
+ "Terraform validates the version format prior to writing it. This\n"+
+ "means that this is invalid of the state becoming corrupted through\n"+
+ "some external means. Please manually modify the Terraform version\n"+
+ "field to be a proper semantic version.",
+ state.TFVersion)
+ }
+ }
+
+ // catch any unitialized fields in the state
+ state.init()
+
+ // Sort it
+ state.sort()
+
+ return state, nil
+}
+
+func ReadStateV3(jsonBytes []byte) (*State, error) {
+ state := &State{}
+ if err := json.Unmarshal(jsonBytes, state); err != nil {
+ return nil, fmt.Errorf("Decoding state file failed: %v", err)
+ }
+
+ // Check the version, this to ensure we don't read a future
+ // version that we don't understand
+ if state.Version > StateVersion {
+ return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
+ SemVersion.String(), state.Version)
+ }
+
+ // Make sure the version is semantic
+ if state.TFVersion != "" {
+ if _, err := version.NewVersion(state.TFVersion); err != nil {
+ return nil, fmt.Errorf(
+ "State contains invalid version: %s\n\n"+
+ "Terraform validates the version format prior to writing it. This\n"+
+ "means that this is invalid of the state becoming corrupted through\n"+
+ "some external means. Please manually modify the Terraform version\n"+
+ "field to be a proper semantic version.",
+ state.TFVersion)
+ }
+ }
+
+ // catch any unitialized fields in the state
+ state.init()
+
+ // Sort it
+ state.sort()
+
+ // Now we write the state back out to detect any changes in normaliztion.
+ // If our state is now written out differently, bump the serial number to
+ // prevent conflicts.
+ var buf bytes.Buffer
+ err := WriteState(state, &buf)
+ if err != nil {
+ return nil, err
+ }
+
+ if !bytes.Equal(jsonBytes, buf.Bytes()) {
+ log.Println("[INFO] state modified during read or write. incrementing serial number")
+ state.Serial++
+ }
+
+ return state, nil
+}
+
+// WriteState writes a state somewhere in a binary format.
+func WriteState(d *State, dst io.Writer) error {
+ // writing a nil state is a noop.
+ if d == nil {
+ return nil
+ }
+
+ // make sure we have no uninitialized fields
+ d.init()
+
+ // Make sure it is sorted
+ d.sort()
+
+ // Ensure the version is set
+ d.Version = StateVersion
+
+ // If the TFVersion is set, verify it. We used to just set the version
+ // here, but this isn't safe since it changes the MD5 sum on some remote
+ // state storage backends such as Atlas. We now leave it be if needed.
+ if d.TFVersion != "" {
+ if _, err := version.NewVersion(d.TFVersion); err != nil {
+ return fmt.Errorf(
+ "Error writing state, invalid version: %s\n\n"+
+ "The Terraform version when writing the state must be a semantic\n"+
+ "version.",
+ d.TFVersion)
+ }
+ }
+
+ // Encode the data in a human-friendly way
+ data, err := json.MarshalIndent(d, "", " ")
+ if err != nil {
+ return fmt.Errorf("Failed to encode state: %s", err)
+ }
+
+ // We append a newline to the data because MarshalIndent doesn't
+ data = append(data, '\n')
+
+ // Write the data out to the dst
+ if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil {
+ return fmt.Errorf("Failed to write state: %v", err)
+ }
+
+ return nil
+}
+
+// resourceNameSort implements the sort.Interface to sort name parts lexically for
+// strings and numerically for integer indexes.
+type resourceNameSort []string
+
+func (r resourceNameSort) Len() int { return len(r) }
+func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
+
+func (r resourceNameSort) Less(i, j int) bool {
+ iParts := strings.Split(r[i], ".")
+ jParts := strings.Split(r[j], ".")
+
+ end := len(iParts)
+ if len(jParts) < end {
+ end = len(jParts)
+ }
+
+ for idx := 0; idx < end; idx++ {
+ if iParts[idx] == jParts[idx] {
+ continue
+ }
+
+ // sort on the first non-matching part
+ iInt, iIntErr := strconv.Atoi(iParts[idx])
+ jInt, jIntErr := strconv.Atoi(jParts[idx])
+
+ switch {
+ case iIntErr == nil && jIntErr == nil:
+ // sort numerically if both parts are integers
+ return iInt < jInt
+ case iIntErr == nil:
+ // numbers sort before strings
+ return true
+ case jIntErr == nil:
+ return false
+ default:
+ return iParts[idx] < jParts[idx]
+ }
+ }
+
+ return r[i] < r[j]
+}
+
+// moduleStateSort implements sort.Interface to sort module states
+type moduleStateSort []*ModuleState
+
+func (s moduleStateSort) Len() int {
+ return len(s)
+}
+
+func (s moduleStateSort) Less(i, j int) bool {
+ a := s[i]
+ b := s[j]
+
+ // If either is nil, then the nil one is "less" than
+ if a == nil || b == nil {
+ return a == nil
+ }
+
+ // If the lengths are different, then the shorter one always wins
+ if len(a.Path) != len(b.Path) {
+ return len(a.Path) < len(b.Path)
+ }
+
+ // Otherwise, compare lexically
+ return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
+}
+
+func (s moduleStateSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+const stateValidateErrMultiModule = `
+Multiple modules with the same path: %s
+
+This means that there are multiple entries in the "modules" field
+in your state file that point to the same module. This will cause Terraform
+to behave in unexpected and error prone ways and is invalid. Please back up
+and modify your state file manually to resolve this.
+`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
new file mode 100644
index 00000000..11637303
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
@@ -0,0 +1,374 @@
+package terraform
+
+import "fmt"
+
+// Add adds the item in the state at the given address.
+//
+// The item can be a ModuleState, ResourceState, or InstanceState. Depending
+// on the item type, the address may or may not be valid. For example, a
+// module cannot be moved to a resource address, however a resource can be
+// moved to a module address (it retains the same name, under that resource).
+//
+// The item can also be a []*ModuleState, which is the case for nested
+// modules. In this case, Add will expect the zero-index to be the top-most
+// module to add and will only nest children from there. For semantics, this
+// is equivalent to module => module.
+//
+// The full semantics of Add:
+//
+// ┌───────────────────┬───────────────────┬───────────────────┐
+// │ Module Address │ Resource Address │ Instance Address │
+// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤
+// │ ModuleState │ ✓ │ x │ x │
+// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
+// │ ResourceState │ ✓ │ ✓ │ maybe* │
+// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
+// │ Instance State │ ✓ │ ✓ │ ✓ │
+// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘
+//
+// *maybe - Resources can be added at an instance address only if the resource
+// represents a single instance (primary). Example:
+// "aws_instance.foo" can be moved to "aws_instance.bar.tainted"
+//
+func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error {
+ // Parse the address
+
+ toAddr, err := ParseResourceAddress(toAddrRaw)
+ if err != nil {
+ return err
+ }
+
+ // Parse the from address
+ fromAddr, err := ParseResourceAddress(fromAddrRaw)
+ if err != nil {
+ return err
+ }
+
+ // Determine the types
+ from := detectValueAddLoc(raw)
+ to := detectAddrAddLoc(toAddr)
+
+ // Find the function to do this
+ fromMap, ok := stateAddFuncs[from]
+ if !ok {
+ return fmt.Errorf("invalid source to add to state: %T", raw)
+ }
+ f, ok := fromMap[to]
+ if !ok {
+ return fmt.Errorf("invalid destination: %s (%d)", toAddr, to)
+ }
+
+ // Call the migrator
+ if err := f(s, fromAddr, toAddr, raw); err != nil {
+ return err
+ }
+
+ // Prune the state
+ s.prune()
+ return nil
+}
+
+func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
+ // raw can be either *ModuleState or []*ModuleState. The former means
+ // we're moving just one module. The latter means we're moving a module
+ // and children.
+ root := raw
+ var rest []*ModuleState
+ if list, ok := raw.([]*ModuleState); ok {
+ // We need at least one item
+ if len(list) == 0 {
+ return fmt.Errorf("module move with no value to: %s", addr)
+ }
+
+ // The first item is always the root
+ root = list[0]
+ if len(list) > 1 {
+ rest = list[1:]
+ }
+ }
+
+ // Get the actual module state
+ src := root.(*ModuleState).deepcopy()
+
+ // If the target module exists, it is an error
+ path := append([]string{"root"}, addr.Path...)
+ if s.ModuleByPath(path) != nil {
+ return fmt.Errorf("module target is not empty: %s", addr)
+ }
+
+ // Create it and copy our outputs and dependencies
+ mod := s.AddModule(path)
+ mod.Outputs = src.Outputs
+ mod.Dependencies = src.Dependencies
+
+ // Go through the resources perform an add for each of those
+ for k, v := range src.Resources {
+ resourceKey, err := ParseResourceStateKey(k)
+ if err != nil {
+ return err
+ }
+
+ // Update the resource address for this
+ addrCopy := *addr
+ addrCopy.Type = resourceKey.Type
+ addrCopy.Name = resourceKey.Name
+ addrCopy.Index = resourceKey.Index
+ addrCopy.Mode = resourceKey.Mode
+
+ // Perform an add
+ if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil {
+ return err
+ }
+ }
+
+ // Add all the children if we have them
+ for _, item := range rest {
+ // If item isn't a descendent of our root, then ignore it
+ if !src.IsDescendent(item) {
+ continue
+ }
+
+ // It is! Strip the leading prefix and attach that to our address
+ extra := item.Path[len(src.Path):]
+ addrCopy := addr.Copy()
+ addrCopy.Path = append(addrCopy.Path, extra...)
+
+ // Add it
+ s.Add(fromAddr.String(), addrCopy.String(), item)
+ }
+
+ return nil
+}
+
+func stateAddFunc_Resource_Module(
+ s *State, from, to *ResourceAddress, raw interface{}) error {
+ // Build the more specific to addr
+ addr := *to
+ addr.Type = from.Type
+ addr.Name = from.Name
+
+ return s.Add(from.String(), addr.String(), raw)
+}
+
+func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
+ // raw can be either *ResourceState or []*ResourceState. The former means
+ // we're moving just one resource. The latter means we're moving a count
+ // of resources.
+ if list, ok := raw.([]*ResourceState); ok {
+ // We need at least one item
+ if len(list) == 0 {
+ return fmt.Errorf("resource move with no value to: %s", addr)
+ }
+
+ // If there is an index, this is an error since we can't assign
+ // a set of resources to a single index
+ if addr.Index >= 0 && len(list) > 1 {
+ return fmt.Errorf(
+ "multiple resources can't be moved to a single index: "+
+ "%s => %s", fromAddr, addr)
+ }
+
+ // Add each with a specific index
+ for i, rs := range list {
+ addrCopy := addr.Copy()
+ addrCopy.Index = i
+
+ if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+
+ src := raw.(*ResourceState).deepcopy()
+
+ // Initialize the resource
+ resourceRaw, exists := stateAddInitAddr(s, addr)
+ if exists {
+ return fmt.Errorf("resource exists and not empty: %s", addr)
+ }
+ resource := resourceRaw.(*ResourceState)
+ resource.Type = src.Type
+ resource.Dependencies = src.Dependencies
+ resource.Provider = src.Provider
+
+ // Move the primary
+ if src.Primary != nil {
+ addrCopy := *addr
+ addrCopy.InstanceType = TypePrimary
+ addrCopy.InstanceTypeSet = true
+ if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil {
+ return err
+ }
+ }
+
+ // Move all deposed
+ if len(src.Deposed) > 0 {
+ resource.Deposed = src.Deposed
+ }
+
+ return nil
+}
+
+func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
+ src := raw.(*InstanceState).DeepCopy()
+
+ // Create the instance
+ instanceRaw, _ := stateAddInitAddr(s, addr)
+ instance := instanceRaw.(*InstanceState)
+
+ // Set it
+ instance.Set(src)
+
+ return nil
+}
+
+func stateAddFunc_Instance_Module(
+ s *State, from, to *ResourceAddress, raw interface{}) error {
+ addr := *to
+ addr.Type = from.Type
+ addr.Name = from.Name
+
+ return s.Add(from.String(), addr.String(), raw)
+}
+
+func stateAddFunc_Instance_Resource(
+ s *State, from, to *ResourceAddress, raw interface{}) error {
+ addr := *to
+ addr.InstanceType = TypePrimary
+ addr.InstanceTypeSet = true
+
+ return s.Add(from.String(), addr.String(), raw)
+}
+
+// stateAddFunc is the type of function for adding an item to a state
+type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error
+
+// stateAddFuncs has the full matrix mapping of the state adders.
+var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc
+
+func init() {
+ stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{
+ stateAddModule: {
+ stateAddModule: stateAddFunc_Module_Module,
+ },
+ stateAddResource: {
+ stateAddModule: stateAddFunc_Resource_Module,
+ stateAddResource: stateAddFunc_Resource_Resource,
+ },
+ stateAddInstance: {
+ stateAddInstance: stateAddFunc_Instance_Instance,
+ stateAddModule: stateAddFunc_Instance_Module,
+ stateAddResource: stateAddFunc_Instance_Resource,
+ },
+ }
+}
+
+// stateAddLoc is an enum to represent the location where state is being
+// moved from/to. We use this for quick lookups in a function map.
+type stateAddLoc uint
+
+const (
+ stateAddInvalid stateAddLoc = iota
+ stateAddModule
+ stateAddResource
+ stateAddInstance
+)
+
+// detectAddrAddLoc detects the state type for the given address. This
+// function is specifically not unit tested since we consider the State.Add
+// functionality to be comprehensive enough to cover this.
+func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc {
+ if addr.Name == "" {
+ return stateAddModule
+ }
+
+ if !addr.InstanceTypeSet {
+ return stateAddResource
+ }
+
+ return stateAddInstance
+}
+
+// detectValueAddLoc determines the stateAddLoc value from the raw value
+// that is some State structure.
+func detectValueAddLoc(raw interface{}) stateAddLoc {
+ switch raw.(type) {
+ case *ModuleState:
+ return stateAddModule
+ case []*ModuleState:
+ return stateAddModule
+ case *ResourceState:
+ return stateAddResource
+ case []*ResourceState:
+ return stateAddResource
+ case *InstanceState:
+ return stateAddInstance
+ default:
+ return stateAddInvalid
+ }
+}
+
+// stateAddInitAddr takes a ResourceAddress and creates the non-existing
+// resources up to that point, returning the empty (or existing) interface
+// at that address.
+func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) {
+ addType := detectAddrAddLoc(addr)
+
+ // Get the module
+ path := append([]string{"root"}, addr.Path...)
+ exists := true
+ mod := s.ModuleByPath(path)
+ if mod == nil {
+ mod = s.AddModule(path)
+ exists = false
+ }
+ if addType == stateAddModule {
+ return mod, exists
+ }
+
+ // Add the resource
+ resourceKey := (&ResourceStateKey{
+ Name: addr.Name,
+ Type: addr.Type,
+ Index: addr.Index,
+ Mode: addr.Mode,
+ }).String()
+ exists = true
+ resource, ok := mod.Resources[resourceKey]
+ if !ok {
+ resource = &ResourceState{Type: addr.Type}
+ resource.init()
+ mod.Resources[resourceKey] = resource
+ exists = false
+ }
+ if addType == stateAddResource {
+ return resource, exists
+ }
+
+ // Get the instance
+ exists = true
+ instance := &InstanceState{}
+ switch addr.InstanceType {
+ case TypePrimary, TypeTainted:
+ if v := resource.Primary; v != nil {
+ instance = resource.Primary
+ } else {
+ exists = false
+ }
+ case TypeDeposed:
+ idx := addr.Index
+ if addr.Index < 0 {
+ idx = 0
+ }
+ if len(resource.Deposed) > idx {
+ instance = resource.Deposed[idx]
+ } else {
+ resource.Deposed = append(resource.Deposed, instance)
+ exists = false
+ }
+ }
+
+ return instance, exists
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
new file mode 100644
index 00000000..2dcb11b7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
@@ -0,0 +1,267 @@
+package terraform
+
+import (
+ "fmt"
+ "sort"
+)
+
+// StateFilter is responsible for filtering and searching a state.
+//
+// This is a separate struct from State rather than a method on State
+// because StateFilter might create sidecar data structures to optimize
+// filtering on the state.
+//
+// If you change the State, the filter created is invalid and either
+// Reset should be called or a new one should be allocated. StateFilter
+// will not watch State for changes and do this for you. If you filter after
+// changing the State without calling Reset, the behavior is not defined.
+type StateFilter struct {
+ State *State
+}
+
+// Filter takes the addresses specified by fs and finds all the matches.
+// The values of fs are resource addressing syntax that can be parsed by
+// ParseResourceAddress.
+func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) {
+ // Parse all the addresses
+ as := make([]*ResourceAddress, len(fs))
+ for i, v := range fs {
+ a, err := ParseResourceAddress(v)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing address '%s': %s", v, err)
+ }
+
+ as[i] = a
+ }
+
+ // If we weren't given any filters, then we list all
+ if len(fs) == 0 {
+ as = append(as, &ResourceAddress{Index: -1})
+ }
+
+ // Filter each of the address. We keep track of this in a map to
+ // strip duplicates.
+ resultSet := make(map[string]*StateFilterResult)
+ for _, a := range as {
+ for _, r := range f.filterSingle(a) {
+ resultSet[r.String()] = r
+ }
+ }
+
+ // Make the result list
+ results := make([]*StateFilterResult, 0, len(resultSet))
+ for _, v := range resultSet {
+ results = append(results, v)
+ }
+
+ // Sort them and return
+ sort.Sort(StateFilterResultSlice(results))
+ return results, nil
+}
+
+func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult {
+ // The slice to keep track of results
+ var results []*StateFilterResult
+
+ // Go through modules first.
+ modules := make([]*ModuleState, 0, len(f.State.Modules))
+ for _, m := range f.State.Modules {
+ if f.relevant(a, m) {
+ modules = append(modules, m)
+
+ // Only add the module to the results if we haven't specified a type.
+ // We also ignore the root module.
+ if a.Type == "" && len(m.Path) > 1 {
+ results = append(results, &StateFilterResult{
+ Path: m.Path[1:],
+ Address: (&ResourceAddress{Path: m.Path[1:]}).String(),
+ Value: m,
+ })
+ }
+ }
+ }
+
+ // With the modules set, go through all the resources within
+ // the modules to find relevant resources.
+ for _, m := range modules {
+ for n, r := range m.Resources {
+ // The name in the state contains valuable information. Parse.
+ key, err := ParseResourceStateKey(n)
+ if err != nil {
+ // If we get an error parsing, then just ignore it
+ // out of the state.
+ continue
+ }
+
+ // Older states and test fixtures often don't contain the
+ // type directly on the ResourceState. We add this so StateFilter
+ // is a bit more robust.
+ if r.Type == "" {
+ r.Type = key.Type
+ }
+
+ if f.relevant(a, r) {
+ if a.Name != "" && a.Name != key.Name {
+ // Name doesn't match
+ continue
+ }
+
+ if a.Index >= 0 && key.Index != a.Index {
+ // Index doesn't match
+ continue
+ }
+
+ if a.Name != "" && a.Name != key.Name {
+ continue
+ }
+
+ // Build the address for this resource
+ addr := &ResourceAddress{
+ Path: m.Path[1:],
+ Name: key.Name,
+ Type: key.Type,
+ Index: key.Index,
+ }
+
+ // Add the resource level result
+ resourceResult := &StateFilterResult{
+ Path: addr.Path,
+ Address: addr.String(),
+ Value: r,
+ }
+ if !a.InstanceTypeSet {
+ results = append(results, resourceResult)
+ }
+
+ // Add the instances
+ if r.Primary != nil {
+ addr.InstanceType = TypePrimary
+ addr.InstanceTypeSet = false
+ results = append(results, &StateFilterResult{
+ Path: addr.Path,
+ Address: addr.String(),
+ Parent: resourceResult,
+ Value: r.Primary,
+ })
+ }
+
+ for _, instance := range r.Deposed {
+ if f.relevant(a, instance) {
+ addr.InstanceType = TypeDeposed
+ addr.InstanceTypeSet = true
+ results = append(results, &StateFilterResult{
+ Path: addr.Path,
+ Address: addr.String(),
+ Parent: resourceResult,
+ Value: instance,
+ })
+ }
+ }
+ }
+ }
+ }
+
+ return results
+}
+
+// relevant checks for relevance of this address against the given value.
+func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool {
+ switch v := raw.(type) {
+ case *ModuleState:
+ path := v.Path[1:]
+
+ if len(addr.Path) > len(path) {
+ // Longer path in address means there is no way we match.
+ return false
+ }
+
+ // Check for a prefix match
+ for i, p := range addr.Path {
+ if path[i] != p {
+ // Any mismatches don't match.
+ return false
+ }
+ }
+
+ return true
+ case *ResourceState:
+ if addr.Type == "" {
+ // If we have no resource type, then we're interested in all!
+ return true
+ }
+
+ // If the type doesn't match we fail immediately
+ if v.Type != addr.Type {
+ return false
+ }
+
+ return true
+ default:
+ // If we don't know about it, let's just say no
+ return false
+ }
+}
+
+// StateFilterResult is a single result from a filter operation. Filter
+// can match multiple things within a state (module, resource, instance, etc.)
+// and this unifies that.
+type StateFilterResult struct {
+ // Module path of the result
+ Path []string
+
+ // Address is the address that can be used to reference this exact result.
+ Address string
+
+ // Parent, if non-nil, is a parent of this result. For instances, the
+ // parent would be a resource. For resources, the parent would be
+ // a module. For modules, this is currently nil.
+ Parent *StateFilterResult
+
+ // Value is the actual value. This must be type switched on. It can be
+ // any data structures that `State` can hold: `ModuleState`,
+ // `ResourceState`, `InstanceState`.
+ Value interface{}
+}
+
+func (r *StateFilterResult) String() string {
+ return fmt.Sprintf("%T: %s", r.Value, r.Address)
+}
+
+func (r *StateFilterResult) sortedType() int {
+ switch r.Value.(type) {
+ case *ModuleState:
+ return 0
+ case *ResourceState:
+ return 1
+ case *InstanceState:
+ return 2
+ default:
+ return 50
+ }
+}
+
+// StateFilterResultSlice is a slice of results that implements
+// sort.Interface. The sorting goal is what is most appealing to
+// human output.
+type StateFilterResultSlice []*StateFilterResult
+
+func (s StateFilterResultSlice) Len() int { return len(s) }
+func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s StateFilterResultSlice) Less(i, j int) bool {
+ a, b := s[i], s[j]
+
+ // if these address contain an index, we want to sort by index rather than name
+ addrA, errA := ParseResourceAddress(a.Address)
+ addrB, errB := ParseResourceAddress(b.Address)
+ if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index {
+ return addrA.Index < addrB.Index
+ }
+
+ // If the addresses are different it is just lexographic sorting
+ if a.Address != b.Address {
+ return a.Address < b.Address
+ }
+
+ // Addresses are the same, which means it matters on the type
+ return a.sortedType() < b.sortedType()
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
new file mode 100644
index 00000000..aa13cce8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
@@ -0,0 +1,189 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/mitchellh/copystructure"
+)
+
+// upgradeStateV1ToV2 is used to upgrade a V1 state representation
+// into a V2 state representation
+func upgradeStateV1ToV2(old *stateV1) (*State, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ remote, err := old.Remote.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading State V1: %v", err)
+ }
+
+ modules := make([]*ModuleState, len(old.Modules))
+ for i, module := range old.Modules {
+ upgraded, err := module.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading State V1: %v", err)
+ }
+ modules[i] = upgraded
+ }
+ if len(modules) == 0 {
+ modules = nil
+ }
+
+ newState := &State{
+ Version: 2,
+ Serial: old.Serial,
+ Remote: remote,
+ Modules: modules,
+ }
+
+ newState.sort()
+ newState.init()
+
+ return newState, nil
+}
+
+func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ config, err := copystructure.Copy(old.Config)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err)
+ }
+
+ return &RemoteState{
+ Type: old.Type,
+ Config: config.(map[string]string),
+ }, nil
+}
+
+func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ pathRaw, err := copystructure.Copy(old.Path)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
+ }
+ path, ok := pathRaw.([]string)
+ if !ok {
+ return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings")
+ }
+ if len(path) == 0 {
+ // We found some V1 states with a nil path. Assume root and catch
+ // duplicate path errors later (as part of Validate).
+ path = rootModulePath
+ }
+
+ // Outputs needs upgrading to use the new structure
+ outputs := make(map[string]*OutputState)
+ for key, output := range old.Outputs {
+ outputs[key] = &OutputState{
+ Type: "string",
+ Value: output,
+ Sensitive: false,
+ }
+ }
+
+ resources := make(map[string]*ResourceState)
+ for key, oldResource := range old.Resources {
+ upgraded, err := oldResource.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
+ }
+ resources[key] = upgraded
+ }
+
+ dependencies, err := copystructure.Copy(old.Dependencies)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
+ }
+
+ return &ModuleState{
+ Path: path,
+ Outputs: outputs,
+ Resources: resources,
+ Dependencies: dependencies.([]string),
+ }, nil
+}
+
+func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ dependencies, err := copystructure.Copy(old.Dependencies)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
+ }
+
+ primary, err := old.Primary.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
+ }
+
+ deposed := make([]*InstanceState, len(old.Deposed))
+ for i, v := range old.Deposed {
+ upgraded, err := v.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
+ }
+ deposed[i] = upgraded
+ }
+ if len(deposed) == 0 {
+ deposed = nil
+ }
+
+ return &ResourceState{
+ Type: old.Type,
+ Dependencies: dependencies.([]string),
+ Primary: primary,
+ Deposed: deposed,
+ Provider: old.Provider,
+ }, nil
+}
+
+func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ attributes, err := copystructure.Copy(old.Attributes)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
+ }
+ ephemeral, err := old.Ephemeral.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
+ }
+
+ meta, err := copystructure.Copy(old.Meta)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
+ }
+
+ newMeta := make(map[string]interface{})
+ for k, v := range meta.(map[string]string) {
+ newMeta[k] = v
+ }
+
+ return &InstanceState{
+ ID: old.ID,
+ Attributes: attributes.(map[string]string),
+ Ephemeral: *ephemeral,
+ Meta: newMeta,
+ }, nil
+}
+
+func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) {
+ connInfo, err := copystructure.Copy(old.ConnInfo)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err)
+ }
+ return &EphemeralState{
+ ConnInfo: connInfo.(map[string]string),
+ }, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
new file mode 100644
index 00000000..e52d35fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
@@ -0,0 +1,142 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// The upgrade process from V2 to V3 state does not affect the structure,
+// so we do not need to redeclare all of the structs involved - we just
+// take a deep copy of the old structure and assert the version number is
+// as we expect.
+func upgradeStateV2ToV3(old *State) (*State, error) {
+ new := old.DeepCopy()
+
+ // Ensure the copied version is v2 before attempting to upgrade
+ if new.Version != 2 {
+ return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " +
+ "a state which is not version 2.")
+ }
+
+ // Set the new version number
+ new.Version = 3
+
+ // Change the counts for things which look like maps to use the %
+ // syntax. Remove counts for empty collections - they will be added
+ // back in later.
+ for _, module := range new.Modules {
+ for _, resource := range module.Resources {
+ // Upgrade Primary
+ if resource.Primary != nil {
+ upgradeAttributesV2ToV3(resource.Primary)
+ }
+
+ // Upgrade Deposed
+ if resource.Deposed != nil {
+ for _, deposed := range resource.Deposed {
+ upgradeAttributesV2ToV3(deposed)
+ }
+ }
+ }
+ }
+
+ return new, nil
+}
+
+func upgradeAttributesV2ToV3(instanceState *InstanceState) error {
+ collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`)
+ collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`)
+
+ // Identify the key prefix of anything which is a collection
+ var collectionKeyPrefixes []string
+ for key := range instanceState.Attributes {
+ if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
+ collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1])
+ }
+ }
+ sort.Strings(collectionKeyPrefixes)
+
+ log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes)
+
+ // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not
+ // run very often.
+ for _, prefix := range collectionKeyPrefixes {
+ // First get the actual keys that belong to this prefix
+ var potentialKeysMatching []string
+ for key := range instanceState.Attributes {
+ if strings.HasPrefix(key, prefix) {
+ potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix))
+ }
+ }
+ sort.Strings(potentialKeysMatching)
+
+ var actualKeysMatching []string
+ for _, key := range potentialKeysMatching {
+ if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
+ actualKeysMatching = append(actualKeysMatching, submatches[0][1])
+ } else {
+ if key != "#" {
+ actualKeysMatching = append(actualKeysMatching, key)
+ }
+ }
+ }
+ actualKeysMatching = uniqueSortedStrings(actualKeysMatching)
+
+ // Now inspect the keys in order to determine whether this is most likely to be
+ // a map, list or set. There is room for error here, so we log in each case. If
+ // there is no method of telling, we remove the key from the InstanceState in
+ // order that it will be recreated. Again, this could be rolled into fewer loops
+ // but we prefer clarity.
+
+ oldCountKey := fmt.Sprintf("%s#", prefix)
+
+ // First, detect "obvious" maps - which have non-numeric keys (mostly).
+ hasNonNumericKeys := false
+ for _, key := range actualKeysMatching {
+ if _, err := strconv.Atoi(key); err != nil {
+ hasNonNumericKeys = true
+ }
+ }
+ if hasNonNumericKeys {
+ newCountKey := fmt.Sprintf("%s%%", prefix)
+
+ instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey]
+ delete(instanceState.Attributes, oldCountKey)
+ log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s",
+ strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey])
+ }
+
+ // Now detect empty collections and remove them from state.
+ if len(actualKeysMatching) == 0 {
+ delete(instanceState.Attributes, oldCountKey)
+ log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.",
+ strings.TrimSuffix(prefix, "."))
+ }
+ }
+
+ return nil
+}
+
+// uniqueSortedStrings removes duplicates from a slice of strings and returns
+// a sorted slice of the unique strings.
+func uniqueSortedStrings(input []string) []string {
+ uniquemap := make(map[string]struct{})
+ for _, str := range input {
+ uniquemap[str] = struct{}{}
+ }
+
+ output := make([]string, len(uniquemap))
+
+ i := 0
+ for key := range uniquemap {
+ output[i] = key
+ i = i + 1
+ }
+
+ sort.Strings(output)
+ return output
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
new file mode 100644
index 00000000..68cffb41
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
@@ -0,0 +1,145 @@
+package terraform
+
+// stateV1 keeps track of a snapshot state-of-the-world that Terraform
+// can use to keep track of what real world resources it is actually
+// managing.
+//
+// stateV1 is _only used for the purposes of backwards compatibility
+// and is no longer used in Terraform.
+//
+// For the upgrade process, see state_upgrade_v1_to_v2.go
+type stateV1 struct {
+ // Version is the protocol version. "1" for a StateV1.
+ Version int `json:"version"`
+
+ // Serial is incremented on any operation that modifies
+ // the State file. It is used to detect potentially conflicting
+ // updates.
+ Serial int64 `json:"serial"`
+
+ // Remote is used to track the metadata required to
+ // pull and push state files from a remote storage endpoint.
+ Remote *remoteStateV1 `json:"remote,omitempty"`
+
+ // Modules contains all the modules in a breadth-first order
+ Modules []*moduleStateV1 `json:"modules"`
+}
+
+type remoteStateV1 struct {
+ // Type controls the client we use for the remote state
+ Type string `json:"type"`
+
+ // Config is used to store arbitrary configuration that
+ // is type specific
+ Config map[string]string `json:"config"`
+}
+
+type moduleStateV1 struct {
+ // Path is the import path from the root module. Modules imports are
+ // always disjoint, so the path represents amodule tree
+ Path []string `json:"path"`
+
+ // Outputs declared by the module and maintained for each module
+ // even though only the root module technically needs to be kept.
+ // This allows operators to inspect values at the boundaries.
+ Outputs map[string]string `json:"outputs"`
+
+ // Resources is a mapping of the logically named resource to
+ // the state of the resource. Each resource may actually have
+ // N instances underneath, although a user only needs to think
+ // about the 1:1 case.
+ Resources map[string]*resourceStateV1 `json:"resources"`
+
+ // Dependencies are a list of things that this module relies on
+ // existing to remain intact. For example: an module may depend
+ // on a VPC ID given by an aws_vpc resource.
+ //
+ // Terraform uses this information to build valid destruction
+ // orders and to warn the user if they're destroying a module that
+ // another resource depends on.
+ //
+ // Things can be put into this list that may not be managed by
+ // Terraform. If Terraform doesn't find a matching ID in the
+ // overall state, then it assumes it isn't managed and doesn't
+ // worry about it.
+ Dependencies []string `json:"depends_on,omitempty"`
+}
+
+type resourceStateV1 struct {
+ // This is filled in and managed by Terraform, and is the resource
+ // type itself such as "mycloud_instance". If a resource provider sets
+ // this value, it won't be persisted.
+ Type string `json:"type"`
+
+ // Dependencies are a list of things that this resource relies on
+ // existing to remain intact. For example: an AWS instance might
+ // depend on a subnet (which itself might depend on a VPC, and so
+ // on).
+ //
+ // Terraform uses this information to build valid destruction
+ // orders and to warn the user if they're destroying a resource that
+ // another resource depends on.
+ //
+ // Things can be put into this list that may not be managed by
+ // Terraform. If Terraform doesn't find a matching ID in the
+ // overall state, then it assumes it isn't managed and doesn't
+ // worry about it.
+ Dependencies []string `json:"depends_on,omitempty"`
+
+ // Primary is the current active instance for this resource.
+ // It can be replaced but only after a successful creation.
+ // This is the instances on which providers will act.
+ Primary *instanceStateV1 `json:"primary"`
+
+ // Tainted is used to track any underlying instances that
+ // have been created but are in a bad or unknown state and
+ // need to be cleaned up subsequently. In the
+ // standard case, there is only at most a single instance.
+ // However, in pathological cases, it is possible for the number
+ // of instances to accumulate.
+ Tainted []*instanceStateV1 `json:"tainted,omitempty"`
+
+ // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
+ // Primary is Deposed to get it out of the way for the replacement Primary to
+ // be created by Apply. If the replacement Primary creates successfully, the
+ // Deposed instance is cleaned up. If there were problems creating the
+ // replacement, the instance remains in the Deposed list so it can be
+ // destroyed in a future run. Functionally, Deposed instances are very
+ // similar to Tainted instances in that Terraform is only tracking them in
+ // order to remember to destroy them.
+ Deposed []*instanceStateV1 `json:"deposed,omitempty"`
+
+ // Provider is used when a resource is connected to a provider with an alias.
+ // If this string is empty, the resource is connected to the default provider,
+ // e.g. "aws_instance" goes with the "aws" provider.
+ // If the resource block contained a "provider" key, that value will be set here.
+ Provider string `json:"provider,omitempty"`
+}
+
+type instanceStateV1 struct {
+ // A unique ID for this resource. This is opaque to Terraform
+ // and is only meant as a lookup mechanism for the providers.
+ ID string `json:"id"`
+
+ // Attributes are basic information about the resource. Any keys here
+ // are accessible in variable format within Terraform configurations:
+ // ${resourcetype.name.attribute}.
+ Attributes map[string]string `json:"attributes,omitempty"`
+
+ // Ephemeral is used to store any state associated with this instance
+ // that is necessary for the Terraform run to complete, but is not
+ // persisted to a state file.
+ Ephemeral ephemeralStateV1 `json:"-"`
+
+ // Meta is a simple K/V map that is persisted to the State but otherwise
+ // ignored by Terraform core. It's meant to be used for accounting by
+ // external client code.
+ Meta map[string]string `json:"meta,omitempty"`
+}
+
+type ephemeralStateV1 struct {
+ // ConnInfo is used for the providers to export information which is
+ // used to connect to the resource for provisioning. For example,
+ // this could contain SSH or WinRM credentials.
+ ConnInfo map[string]string `json:"-"`
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/testing.go b/vendor/github.com/hashicorp/terraform/terraform/testing.go
new file mode 100644
index 00000000..3f0418d9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/testing.go
@@ -0,0 +1,19 @@
+package terraform
+
+import (
+ "os"
+ "testing"
+)
+
+// TestStateFile writes the given state to the path.
+func TestStateFile(t *testing.T, path string, state *State) {
+ f, err := os.Create(path)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer f.Close()
+
+ if err := WriteState(state, f); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go
new file mode 100644
index 00000000..f4a431a6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go
@@ -0,0 +1,52 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphTransformer is the interface that transformers implement. This
+// interface is only for transforms that need entire graph visibility.
+type GraphTransformer interface {
+ Transform(*Graph) error
+}
+
+// GraphVertexTransformer is an interface that transforms a single
+// Vertex within with graph. This is a specialization of GraphTransformer
+// that makes it easy to do vertex replacement.
+//
+// The GraphTransformer that runs through the GraphVertexTransformers is
+// VertexTransformer.
+type GraphVertexTransformer interface {
+ Transform(dag.Vertex) (dag.Vertex, error)
+}
+
+// GraphTransformIf is a helper function that conditionally returns a
+// GraphTransformer given. This is useful for calling inline a sequence
+// of transforms without having to split it up into multiple append() calls.
+func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer {
+ if f() {
+ return then
+ }
+
+ return nil
+}
+
+type graphTransformerMulti struct {
+ Transforms []GraphTransformer
+}
+
+func (t *graphTransformerMulti) Transform(g *Graph) error {
+ for _, t := range t.Transforms {
+ if err := t.Transform(g); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GraphTransformMulti combines multiple graph transformers into a single
+// GraphTransformer that runs all the individual graph transformers.
+func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer {
+ return &graphTransformerMulti{Transforms: ts}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
new file mode 100644
index 00000000..10506ea0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
@@ -0,0 +1,80 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// GraphNodeAttachProvider is an interface that must be implemented by nodes
+// that want provider configurations attached.
+type GraphNodeAttachProvider interface {
+ // Must be implemented to determine the path for the configuration
+ GraphNodeSubPath
+
+ // ProviderName with no module prefix. Example: "aws".
+ ProviderName() string
+
+ // Sets the configuration
+ AttachProvider(*config.ProviderConfig)
+}
+
+// AttachProviderConfigTransformer goes through the graph and attaches
+// provider configuration structures to nodes that implement the interfaces
+// above.
+//
+// The attached configuration structures are directly from the configuration.
+// If they're going to be modified, a copy should be made.
+type AttachProviderConfigTransformer struct {
+ Module *module.Tree // Module is the root module for the config
+}
+
+func (t *AttachProviderConfigTransformer) Transform(g *Graph) error {
+ if err := t.attachProviders(g); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error {
+ // Go through and find GraphNodeAttachProvider
+ for _, v := range g.Vertices() {
+ // Only care about GraphNodeAttachProvider implementations
+ apn, ok := v.(GraphNodeAttachProvider)
+ if !ok {
+ continue
+ }
+
+ // Determine what we're looking for
+ path := normalizeModulePath(apn.Path())
+ path = path[1:]
+ name := apn.ProviderName()
+ log.Printf("[TRACE] Attach provider request: %#v %s", path, name)
+
+ // Get the configuration.
+ tree := t.Module.Child(path)
+ if tree == nil {
+ continue
+ }
+
+ // Go through the provider configs to find the matching config
+ for _, p := range tree.Config().ProviderConfigs {
+ // Build the name, which is "name.alias" if an alias exists
+ current := p.Name
+ if p.Alias != "" {
+ current += "." + p.Alias
+ }
+
+ // If the configs match then attach!
+ if current == name {
+ log.Printf("[TRACE] Attaching provider config: %#v", p)
+ apn.AttachProvider(p)
+ break
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
new file mode 100644
index 00000000..f2ee37e5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
@@ -0,0 +1,78 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes
+// that want resource configurations attached.
+type GraphNodeAttachResourceConfig interface {
+ // ResourceAddr is the address to the resource
+ ResourceAddr() *ResourceAddress
+
+ // Sets the configuration
+ AttachResourceConfig(*config.Resource)
+}
+
+// AttachResourceConfigTransformer goes through the graph and attaches
+// resource configuration structures to nodes that implement the interfaces
+// above.
+//
+// The attached configuration structures are directly from the configuration.
+// If they're going to be modified, a copy should be made.
+type AttachResourceConfigTransformer struct {
+ Module *module.Tree // Module is the root module for the config
+}
+
+func (t *AttachResourceConfigTransformer) Transform(g *Graph) error {
+ log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...")
+
+ // Go through and find GraphNodeAttachResource
+ for _, v := range g.Vertices() {
+ // Only care about GraphNodeAttachResource implementations
+ arn, ok := v.(GraphNodeAttachResourceConfig)
+ if !ok {
+ continue
+ }
+
+ // Determine what we're looking for
+ addr := arn.ResourceAddr()
+ log.Printf(
+ "[TRACE] AttachResourceConfigTransformer: Attach resource "+
+ "config request: %s", addr)
+
+ // Get the configuration.
+ path := normalizeModulePath(addr.Path)
+ path = path[1:]
+ tree := t.Module.Child(path)
+ if tree == nil {
+ continue
+ }
+
+ // Go through the resource configs to find the matching config
+ for _, r := range tree.Config().Resources {
+ // Get a resource address so we can compare
+ a, err := parseResourceAddressConfig(r)
+ if err != nil {
+ panic(fmt.Sprintf(
+ "Error parsing config address, this is a bug: %#v", r))
+ }
+ a.Path = addr.Path
+
+ // If this is not the same resource, then continue
+ if !a.Equals(addr) {
+ continue
+ }
+
+ log.Printf("[TRACE] Attaching resource config: %#v", r)
+ arn.AttachResourceConfig(r)
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
new file mode 100644
index 00000000..564ff08f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
@@ -0,0 +1,68 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeAttachResourceState is an interface that can be implemented
+// to request that a ResourceState is attached to the node.
+type GraphNodeAttachResourceState interface {
+ // The address to the resource for the state
+ ResourceAddr() *ResourceAddress
+
+ // Sets the state
+ AttachResourceState(*ResourceState)
+}
+
+// AttachStateTransformer goes through the graph and attaches
+// state to nodes that implement the interfaces above.
+type AttachStateTransformer struct {
+ State *State // State is the root state
+}
+
+func (t *AttachStateTransformer) Transform(g *Graph) error {
+ // If no state, then nothing to do
+ if t.State == nil {
+ log.Printf("[DEBUG] Not attaching any state: state is nil")
+ return nil
+ }
+
+ filter := &StateFilter{State: t.State}
+ for _, v := range g.Vertices() {
+ // Only care about nodes requesting we're adding state
+ an, ok := v.(GraphNodeAttachResourceState)
+ if !ok {
+ continue
+ }
+ addr := an.ResourceAddr()
+
+ // Get the module state
+ results, err := filter.Filter(addr.String())
+ if err != nil {
+ return err
+ }
+
+ // Attach the first resource state we get
+ found := false
+ for _, result := range results {
+ if rs, ok := result.Value.(*ResourceState); ok {
+ log.Printf(
+ "[DEBUG] Attaching resource state to %q: %#v",
+ dag.VertexName(v), rs)
+ an.AttachResourceState(rs)
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ log.Printf(
+ "[DEBUG] Resource state not found for %q: %s",
+ dag.VertexName(v), addr)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
new file mode 100644
index 00000000..61bce853
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
@@ -0,0 +1,135 @@
+package terraform
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "sync"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ConfigTransformer is a GraphTransformer that adds all the resources
+// from the configuration to the graph.
+//
+// The module used to configure this transformer must be the root module.
+//
+// Only resources are added to the graph. Variables, outputs, and
+// providers must be added via other transforms.
+//
+// Unlike ConfigTransformerOld, this transformer creates a graph with
+// all resources including module resources, rather than creating module
+// nodes that are then "flattened".
+type ConfigTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ // Module is the module to add resources from.
+ Module *module.Tree
+
+ // Unique will only add resources that aren't already present in the graph.
+ Unique bool
+
+ // Mode will only add resources that match the given mode
+ ModeFilter bool
+ Mode config.ResourceMode
+
+ l sync.Mutex
+ uniqueMap map[string]struct{}
+}
+
+func (t *ConfigTransformer) Transform(g *Graph) error {
+ // Lock since we use some internal state
+ t.l.Lock()
+ defer t.l.Unlock()
+
+ // If no module is given, we don't do anything
+ if t.Module == nil {
+ return nil
+ }
+
+ // If the module isn't loaded, that is simply an error
+ if !t.Module.Loaded() {
+ return errors.New("module must be loaded for ConfigTransformer")
+ }
+
+ // Reset the uniqueness map. If we're tracking uniques, then populate
+ // it with addresses.
+ t.uniqueMap = make(map[string]struct{})
+ defer func() { t.uniqueMap = nil }()
+ if t.Unique {
+ for _, v := range g.Vertices() {
+ if rn, ok := v.(GraphNodeResource); ok {
+ t.uniqueMap[rn.ResourceAddr().String()] = struct{}{}
+ }
+ }
+ }
+
+ // Start the transformation process
+ return t.transform(g, t.Module)
+}
+
+func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error {
+ // If no config, do nothing
+ if m == nil {
+ return nil
+ }
+
+ // Add our resources
+ if err := t.transformSingle(g, m); err != nil {
+ return err
+ }
+
+ // Transform all the children.
+ for _, c := range m.Children() {
+ if err := t.transform(g, c); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error {
+ log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path())
+
+ // Get the configuration for this module
+ conf := m.Config()
+
+ // Build the path we're at
+ path := m.Path()
+
+ // Write all the resources out
+ for _, r := range conf.Resources {
+ // Build the resource address
+ addr, err := parseResourceAddressConfig(r)
+ if err != nil {
+ panic(fmt.Sprintf(
+ "Error parsing config address, this is a bug: %#v", r))
+ }
+ addr.Path = path
+
+ // If this is already in our uniqueness map, don't add it again
+ if _, ok := t.uniqueMap[addr.String()]; ok {
+ continue
+ }
+
+ // Remove non-matching modes
+ if t.ModeFilter && addr.Mode != t.Mode {
+ continue
+ }
+
+ // Build the abstract node and the concrete one
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ // Add it to the graph
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
new file mode 100644
index 00000000..92f9888d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
@@ -0,0 +1,80 @@
+package terraform
+
+import (
+ "errors"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// FlatConfigTransformer is a GraphTransformer that adds the configuration
+// to the graph. The module used to configure this transformer must be
+// the root module.
+//
+// This transform adds the nodes but doesn't connect any of the references.
+// The ReferenceTransformer should be used for that.
+//
+// NOTE: In relation to ConfigTransformer: this is a newer generation config
+// transformer. It puts the _entire_ config into the graph (there is no
+// "flattening" step as before).
+type FlatConfigTransformer struct {
+ Concrete ConcreteResourceNodeFunc // What to turn resources into
+
+ Module *module.Tree
+}
+
+func (t *FlatConfigTransformer) Transform(g *Graph) error {
+ // If no module, we do nothing
+ if t.Module == nil {
+ return nil
+ }
+
+ // If the module is not loaded, that is an error
+ if !t.Module.Loaded() {
+ return errors.New("module must be loaded")
+ }
+
+ return t.transform(g, t.Module)
+}
+
+func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error {
+ // If no module, no problem
+ if m == nil {
+ return nil
+ }
+
+ // Transform all the children.
+ for _, c := range m.Children() {
+ if err := t.transform(g, c); err != nil {
+ return err
+ }
+ }
+
+ // Get the configuration for this module
+ config := m.Config()
+
+ // Write all the resources out
+ for _, r := range config.Resources {
+ // Grab the address for this resource
+ addr, err := parseResourceAddressConfig(r)
+ if err != nil {
+ return err
+ }
+ addr.Path = m.Path()
+
+ // Build the abstract resource. We have the config already so
+ // we'll just pre-populate that.
+ abstract := &NodeAbstractResource{
+ Addr: addr,
+ Config: r,
+ }
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
new file mode 100644
index 00000000..ec412582
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
@@ -0,0 +1,23 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// varNameForVar returns the VarName value for an interpolated variable.
+// This value is compared to the VarName() value for the nodes within the
+// graph to build the graph edges.
+func varNameForVar(raw config.InterpolatedVariable) string {
+ switch v := raw.(type) {
+ case *config.ModuleVariable:
+ return fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)
+ case *config.ResourceVariable:
+ return v.ResourceId()
+ case *config.UserVariable:
+ return fmt.Sprintf("var.%s", v.Name)
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
new file mode 100644
index 00000000..83415f35
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
@@ -0,0 +1,28 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// CountBoundaryTransformer adds a node that depends on everything else
+// so that it runs last in order to clean up the state for nodes that
+// are on the "count boundary": "foo.0" when only one exists becomes "foo"
+type CountBoundaryTransformer struct{}
+
+func (t *CountBoundaryTransformer) Transform(g *Graph) error {
+ node := &NodeCountBoundary{}
+ g.Add(node)
+
+ // Depends on everything
+ for _, v := range g.Vertices() {
+ // Don't connect to ourselves
+ if v == node {
+ continue
+ }
+
+ // Connect!
+ g.Connect(dag.BasicEdge(node, v))
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
new file mode 100644
index 00000000..2148cef4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
@@ -0,0 +1,168 @@
+package terraform
+
+import "fmt"
+
+// DeposedTransformer is a GraphTransformer that adds deposed resources
+// to the graph.
+type DeposedTransformer struct {
+ // State is the global state. We'll automatically find the correct
+ // ModuleState based on the Graph.Path that is being transformed.
+ State *State
+
+ // View, if non-empty, is the ModuleState.View used around the state
+ // to find deposed resources.
+ View string
+}
+
+func (t *DeposedTransformer) Transform(g *Graph) error {
+ state := t.State.ModuleByPath(g.Path)
+ if state == nil {
+ // If there is no state for our module there can't be any deposed
+ // resources, since they live in the state.
+ return nil
+ }
+
+ // If we have a view, apply it now
+ if t.View != "" {
+ state = state.View(t.View)
+ }
+
+ // Go through all the resources in our state to look for deposed resources
+ for k, rs := range state.Resources {
+ // If we have no deposed resources, then move on
+ if len(rs.Deposed) == 0 {
+ continue
+ }
+ deposed := rs.Deposed
+
+ for i, _ := range deposed {
+ g.Add(&graphNodeDeposedResource{
+ Index: i,
+ ResourceName: k,
+ ResourceType: rs.Type,
+ Provider: rs.Provider,
+ })
+ }
+ }
+
+ return nil
+}
+
+// graphNodeDeposedResource is the graph vertex representing a deposed resource.
+type graphNodeDeposedResource struct {
+ Index int
+ ResourceName string
+ ResourceType string
+ Provider string
+}
+
+func (n *graphNodeDeposedResource) Name() string {
+ return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index)
+}
+
+func (n *graphNodeDeposedResource) ProvidedBy() []string {
+ return []string{resourceProvider(n.ResourceName, n.Provider)}
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeDeposedResource) EvalTree() EvalNode {
+ var provider ResourceProvider
+ var state *InstanceState
+
+ seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)}
+
+ // Build instance info
+ info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType}
+ seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info})
+
+ // Refresh the resource
+ seq.Nodes = append(seq.Nodes, &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadStateDeposed{
+ Name: n.ResourceName,
+ Output: &state,
+ Index: n.Index,
+ },
+ &EvalRefresh{
+ Info: info,
+ Provider: &provider,
+ State: &state,
+ Output: &state,
+ },
+ &EvalWriteStateDeposed{
+ Name: n.ResourceName,
+ ResourceType: n.ResourceType,
+ Provider: n.Provider,
+ State: &state,
+ Index: n.Index,
+ },
+ },
+ },
+ })
+
+ // Apply
+ var diff *InstanceDiff
+ var err error
+ seq.Nodes = append(seq.Nodes, &EvalOpFilter{
+ Ops: []walkOperation{walkApply, walkDestroy},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadStateDeposed{
+ Name: n.ResourceName,
+ Output: &state,
+ Index: n.Index,
+ },
+ &EvalDiffDestroy{
+ Info: info,
+ State: &state,
+ Output: &diff,
+ },
+ // Call pre-apply hook
+ &EvalApplyPre{
+ Info: info,
+ State: &state,
+ Diff: &diff,
+ },
+ &EvalApply{
+ Info: info,
+ State: &state,
+ Diff: &diff,
+ Provider: &provider,
+ Output: &state,
+ Error: &err,
+ },
+ // Always write the resource back to the state deposed... if it
+ // was successfully destroyed it will be pruned. If it was not, it will
+ // be caught on the next run.
+ &EvalWriteStateDeposed{
+ Name: n.ResourceName,
+ ResourceType: n.ResourceType,
+ Provider: n.Provider,
+ State: &state,
+ Index: n.Index,
+ },
+ &EvalApplyPost{
+ Info: info,
+ State: &state,
+ Error: &err,
+ },
+ &EvalReturnError{
+ Error: &err,
+ },
+ &EvalUpdateStateHook{},
+ },
+ },
+ })
+
+ return seq
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
new file mode 100644
index 00000000..edfb460b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
@@ -0,0 +1,257 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeDestroyerCBD must be implemented by nodes that might be
+// create-before-destroy destroyers.
+type GraphNodeDestroyerCBD interface {
+ GraphNodeDestroyer
+
+ // CreateBeforeDestroy returns true if this node represents a node
+ // that is doing a CBD.
+ CreateBeforeDestroy() bool
+
+ // ModifyCreateBeforeDestroy is called when the CBD state of a node
+ // is changed dynamically. This can return an error if this isn't
+ // allowed.
+ ModifyCreateBeforeDestroy(bool) error
+}
+
+// CBDEdgeTransformer modifies the edges of CBD nodes that went through
+// the DestroyEdgeTransformer to have the right dependencies. There are
+// two real tasks here:
+//
+// 1. With CBD, the destroy edge is inverted: the destroy depends on
+// the creation.
+//
+// 2. A_d must depend on resources that depend on A. This is to enable
+// the destroy to only happen once nodes that depend on A successfully
+// update to A. Example: adding a web server updates the load balancer
+// before deleting the old web server.
+//
+type CBDEdgeTransformer struct {
+ // Module and State are only needed to look up dependencies in
+ // any way possible. Either can be nil if not availabile.
+ Module *module.Tree
+ State *State
+}
+
+func (t *CBDEdgeTransformer) Transform(g *Graph) error {
+ log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...")
+
+ // Go through and reverse any destroy edges
+ destroyMap := make(map[string][]dag.Vertex)
+ for _, v := range g.Vertices() {
+ dn, ok := v.(GraphNodeDestroyerCBD)
+ if !ok {
+ continue
+ }
+
+ if !dn.CreateBeforeDestroy() {
+ // If there are no CBD ancestors (dependent nodes), then we
+ // do nothing here.
+ if !t.hasCBDAncestor(g, v) {
+ continue
+ }
+
+ // If this isn't naturally a CBD node, this means that an ancestor is
+ // and we need to auto-upgrade this node to CBD. We do this because
+ // a CBD node depending on non-CBD will result in cycles. To avoid this,
+ // we always attempt to upgrade it.
+ if err := dn.ModifyCreateBeforeDestroy(true); err != nil {
+ return fmt.Errorf(
+ "%s: must have create before destroy enabled because "+
+ "a dependent resource has CBD enabled. However, when "+
+ "attempting to automatically do this, an error occurred: %s",
+ dag.VertexName(v), err)
+ }
+ }
+
+ // Find the destroy edge. There should only be one.
+ for _, e := range g.EdgesTo(v) {
+ // Not a destroy edge, ignore it
+ de, ok := e.(*DestroyEdge)
+ if !ok {
+ continue
+ }
+
+ log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s",
+ dag.VertexName(de.Source()), dag.VertexName(de.Target()))
+
+ // Found it! Invert.
+ g.RemoveEdge(de)
+ g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()})
+ }
+
+ // If the address has an index, we strip that. Our depMap creation
+ // graph doesn't expand counts so we don't currently get _exact_
+ // dependencies. One day when we limit dependencies more exactly
+ // this will have to change. We have a test case covering this
+ // (depNonCBDCountBoth) so it'll be caught.
+ addr := dn.DestroyAddr()
+ if addr.Index >= 0 {
+ addr = addr.Copy() // Copy so that we don't modify any pointers
+ addr.Index = -1
+ }
+
+ // Add this to the list of nodes that we need to fix up
+ // the edges for (step 2 above in the docs).
+ key := addr.String()
+ destroyMap[key] = append(destroyMap[key], v)
+ }
+
+ // If we have no CBD nodes, then our work here is done
+ if len(destroyMap) == 0 {
+ return nil
+ }
+
+ // We have CBD nodes. We now have to move on to the much more difficult
+ // task of connecting dependencies of the creation side of the destroy
+ // to the destruction node. The easiest way to explain this is an example:
+ //
+ // Given a pre-destroy dependence of: A => B
+ // And A has CBD set.
+ //
+ // The resulting graph should be: A => B => A_d
+ //
+ // They key here is that B happens before A is destroyed. This is to
+ // facilitate the primary purpose for CBD: making sure that downstreams
+ // are properly updated to avoid downtime before the resource is destroyed.
+ //
+ // We can't trust that the resource being destroyed or anything that
+ // depends on it is actually in our current graph so we make a new
+ // graph in order to determine those dependencies and add them in.
+ log.Printf("[TRACE] CBDEdgeTransformer: building graph to find dependencies...")
+ depMap, err := t.depMap(destroyMap)
+ if err != nil {
+ return err
+ }
+
+ // We now have the mapping of resource addresses to the destroy
+ // nodes they need to depend on. We now go through our own vertices to
+ // find any matching these addresses and make the connection.
+ for _, v := range g.Vertices() {
+ // We're looking for creators
+ rn, ok := v.(GraphNodeCreator)
+ if !ok {
+ continue
+ }
+
+ // Get the address
+ addr := rn.CreateAddr()
+
+ // If the address has an index, we strip that. Our depMap creation
+ // graph doesn't expand counts so we don't currently get _exact_
+ // dependencies. One day when we limit dependencies more exactly
+ // this will have to change. We have a test case covering this
+ // (depNonCBDCount) so it'll be caught.
+ if addr.Index >= 0 {
+ addr = addr.Copy() // Copy so that we don't modify any pointers
+ addr.Index = -1
+ }
+
+ // If there is nothing this resource should depend on, ignore it
+ key := addr.String()
+ dns, ok := depMap[key]
+ if !ok {
+ continue
+ }
+
+ // We have nodes! Make the connection
+ for _, dn := range dns {
+ log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s",
+ dag.VertexName(dn), dag.VertexName(v))
+ g.Connect(dag.BasicEdge(dn, v))
+ }
+ }
+
+ return nil
+}
+
+func (t *CBDEdgeTransformer) depMap(
+ destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) {
+ // Build the graph of our config, this ensures that all resources
+ // are present in the graph.
+ g, err := (&BasicGraphBuilder{
+ Steps: []GraphTransformer{
+ &FlatConfigTransformer{Module: t.Module},
+ &AttachResourceConfigTransformer{Module: t.Module},
+ &AttachStateTransformer{State: t.State},
+ &ReferenceTransformer{},
+ },
+ Name: "CBDEdgeTransformer",
+ }).Build(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Using this graph, build the list of destroy nodes that each resource
+ // address should depend on. For example, when we find B, we map the
+ // address of B to A_d in the "depMap" variable below.
+ depMap := make(map[string][]dag.Vertex)
+ for _, v := range g.Vertices() {
+ // We're looking for resources.
+ rn, ok := v.(GraphNodeResource)
+ if !ok {
+ continue
+ }
+
+ // Get the address
+ addr := rn.ResourceAddr()
+ key := addr.String()
+
+ // Get the destroy nodes that are destroying this resource.
+ // If there aren't any, then we don't need to worry about
+ // any connections.
+ dns, ok := destroyMap[key]
+ if !ok {
+ continue
+ }
+
+ // Get the nodes that depend on this on. In the example above:
+ // finding B in A => B.
+ for _, v := range g.UpEdges(v).List() {
+ // We're looking for resources.
+ rn, ok := v.(GraphNodeResource)
+ if !ok {
+ continue
+ }
+
+ // Keep track of the destroy nodes that this address
+ // needs to depend on.
+ key := rn.ResourceAddr().String()
+ depMap[key] = append(depMap[key], dns...)
+ }
+ }
+
+ return depMap, nil
+}
+
+// hasCBDAncestor returns true if any ancestor (node that depends on this)
+// has CBD set.
+func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool {
+ s, _ := g.Ancestors(v)
+ if s == nil {
+ return true
+ }
+
+ for _, v := range s.List() {
+ dn, ok := v.(GraphNodeDestroyerCBD)
+ if !ok {
+ continue
+ }
+
+ if dn.CreateBeforeDestroy() {
+ // some ancestor is CreateBeforeDestroy, so we need to follow suit
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
new file mode 100644
index 00000000..22be1ab6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
@@ -0,0 +1,269 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeDestroyer must be implemented by nodes that destroy resources.
+type GraphNodeDestroyer interface {
+ dag.Vertex
+
+ // ResourceAddr is the address of the resource that is being
+ // destroyed by this node. If this returns nil, then this node
+ // is not destroying anything.
+ DestroyAddr() *ResourceAddress
+}
+
+// GraphNodeCreator must be implemented by nodes that create OR update resources.
+type GraphNodeCreator interface {
+ // ResourceAddr is the address of the resource being created or updated
+ CreateAddr() *ResourceAddress
+}
+
+// DestroyEdgeTransformer is a GraphTransformer that creates the proper
+// references for destroy resources. Destroy resources are more complex
+// in that they must be depend on the destruction of resources that
+// in turn depend on the CREATION of the node being destroy.
+//
+// That is complicated. Visually:
+//
+// B_d -> A_d -> A -> B
+//
+// Notice that A destroy depends on B destroy, while B create depends on
+// A create. They're inverted. This must be done for example because often
+// dependent resources will block parent resources from deleting. Concrete
+// example: VPC with subnets, the VPC can't be deleted while there are
+// still subnets.
+type DestroyEdgeTransformer struct {
+ // These are needed to properly build the graph of dependencies
+ // to determine what a destroy node depends on. Any of these can be nil.
+ Module *module.Tree
+ State *State
+}
+
+func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
+ log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...")
+
+ // Build a map of what is being destroyed (by address string) to
+ // the list of destroyers. In general there will only be one destroyer
+ // but to make it more robust we support multiple.
+ destroyers := make(map[string][]GraphNodeDestroyer)
+ for _, v := range g.Vertices() {
+ dn, ok := v.(GraphNodeDestroyer)
+ if !ok {
+ continue
+ }
+
+ addr := dn.DestroyAddr()
+ if addr == nil {
+ continue
+ }
+
+ key := addr.String()
+ log.Printf(
+ "[TRACE] DestroyEdgeTransformer: %s destroying %q",
+ dag.VertexName(dn), key)
+ destroyers[key] = append(destroyers[key], dn)
+ }
+
+ // If we aren't destroying anything, there will be no edges to make
+ // so just exit early and avoid future work.
+ if len(destroyers) == 0 {
+ return nil
+ }
+
+ // Go through and connect creators to destroyers. Going along with
+ // our example, this makes: A_d => A
+ for _, v := range g.Vertices() {
+ cn, ok := v.(GraphNodeCreator)
+ if !ok {
+ continue
+ }
+
+ addr := cn.CreateAddr()
+ if addr == nil {
+ continue
+ }
+
+ key := addr.String()
+ ds := destroyers[key]
+ if len(ds) == 0 {
+ continue
+ }
+
+ for _, d := range ds {
+ // For illustrating our example
+ a_d := d.(dag.Vertex)
+ a := v
+
+ log.Printf(
+ "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s",
+ dag.VertexName(a), dag.VertexName(a_d))
+
+ g.Connect(&DestroyEdge{S: a, T: a_d})
+ }
+ }
+
+ // This is strange but is the easiest way to get the dependencies
+ // of a node that is being destroyed. We use another graph to make sure
+ // the resource is in the graph and ask for references. We have to do this
+ // because the node that is being destroyed may NOT be in the graph.
+ //
+ // Example: resource A is force new, then destroy A AND create A are
+ // in the graph. BUT if resource A is just pure destroy, then only
+ // destroy A is in the graph, and create A is not.
+ providerFn := func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{NodeAbstractProvider: a}
+ }
+ steps := []GraphTransformer{
+ // Add outputs and metadata
+ &OutputTransformer{Module: t.Module},
+ &AttachResourceConfigTransformer{Module: t.Module},
+ &AttachStateTransformer{State: t.State},
+
+ // Add providers since they can affect destroy order as well
+ &MissingProviderTransformer{AllowAny: true, Concrete: providerFn},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: t.Module},
+
+ // Add all the variables. We can depend on resources through
+ // variables due to module parameters, and we need to properly
+ // determine that.
+ &RootVariableTransformer{Module: t.Module},
+ &ModuleVariableTransformer{Module: t.Module},
+
+ &ReferenceTransformer{},
+ }
+
+ // Go through all the nodes being destroyed and create a graph.
+ // The resulting graph is only of things being CREATED. For example,
+ // following our example, the resulting graph would be:
+ //
+ // A, B (with no edges)
+ //
+ var tempG Graph
+ var tempDestroyed []dag.Vertex
+ for d, _ := range destroyers {
+ // d is what is being destroyed. We parse the resource address
+ // which it came from it is a panic if this fails.
+ addr, err := ParseResourceAddress(d)
+ if err != nil {
+ panic(err)
+ }
+
+ // This part is a little bit weird but is the best way to
+ // find the dependencies we need to: build a graph and use the
+ // attach config and state transformers then ask for references.
+ abstract := &NodeAbstractResource{Addr: addr}
+ tempG.Add(abstract)
+ tempDestroyed = append(tempDestroyed, abstract)
+
+ // We also add the destroy version here since the destroy can
+ // depend on things that the creation doesn't (destroy provisioners).
+ destroy := &NodeDestroyResource{NodeAbstractResource: abstract}
+ tempG.Add(destroy)
+ tempDestroyed = append(tempDestroyed, destroy)
+ }
+
+ // Run the graph transforms so we have the information we need to
+ // build references.
+ for _, s := range steps {
+ if err := s.Transform(&tempG); err != nil {
+ return err
+ }
+ }
+
+ log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String())
+
+ // Go through all the nodes in the graph and determine what they
+ // depend on.
+ for _, v := range tempDestroyed {
+ // Find all ancestors of this to determine the edges we'll depend on
+ vs, err := tempG.Ancestors(v)
+ if err != nil {
+ return err
+ }
+
+ refs := make([]dag.Vertex, 0, vs.Len())
+ for _, raw := range vs.List() {
+ refs = append(refs, raw.(dag.Vertex))
+ }
+
+ refNames := make([]string, len(refs))
+ for i, ref := range refs {
+ refNames[i] = dag.VertexName(ref)
+ }
+ log.Printf(
+ "[TRACE] DestroyEdgeTransformer: creation node %q references %s",
+ dag.VertexName(v), refNames)
+
+ // If we have no references, then we won't need to do anything
+ if len(refs) == 0 {
+ continue
+ }
+
+ // Get the destroy node for this. In the example of our struct,
+ // we are currently at B and we're looking for B_d.
+ rn, ok := v.(GraphNodeResource)
+ if !ok {
+ continue
+ }
+
+ addr := rn.ResourceAddr()
+ if addr == nil {
+ continue
+ }
+
+ dns := destroyers[addr.String()]
+
+ // We have dependencies, check if any are being destroyed
+ // to build the list of things that we must depend on!
+ //
+ // In the example of the struct, if we have:
+ //
+ // B_d => A_d => A => B
+ //
+ // Then at this point in the algorithm we started with B_d,
+ // we built B (to get dependencies), and we found A. We're now looking
+ // to see if A_d exists.
+ var depDestroyers []dag.Vertex
+ for _, v := range refs {
+ rn, ok := v.(GraphNodeResource)
+ if !ok {
+ continue
+ }
+
+ addr := rn.ResourceAddr()
+ if addr == nil {
+ continue
+ }
+
+ key := addr.String()
+ if ds, ok := destroyers[key]; ok {
+ for _, d := range ds {
+ depDestroyers = append(depDestroyers, d.(dag.Vertex))
+ log.Printf(
+ "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s",
+ key, dag.VertexName(d))
+ }
+ }
+ }
+
+ // Go through and make the connections. Use the variable
+ // names "a_d" and "b_d" to reference our example.
+ for _, a_d := range dns {
+ for _, b_d := range depDestroyers {
+ if b_d != a_d {
+ g.Connect(dag.BasicEdge(b_d, a_d))
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
new file mode 100644
index 00000000..ad46d3c6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
@@ -0,0 +1,86 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// DiffTransformer is a GraphTransformer that adds the elements of
+// the diff to the graph.
+//
+// This transform is used for example by the ApplyGraphBuilder to ensure
+// that only resources that are being modified are represented in the graph.
+//
+// Module and State is still required for the DiffTransformer for annotations
+// since the Diff doesn't contain all the information required to build the
+// complete graph (such as create-before-destroy information). The graph
+// is built based on the diff first, though, ensuring that only resources
+// that are being modified are present in the graph.
+type DiffTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ Diff *Diff
+ Module *module.Tree
+ State *State
+}
+
+func (t *DiffTransformer) Transform(g *Graph) error {
+ // If the diff is nil or empty (nil is empty) then do nothing
+ if t.Diff.Empty() {
+ return nil
+ }
+
+ // Go through all the modules in the diff.
+ log.Printf("[TRACE] DiffTransformer: starting")
+ var nodes []dag.Vertex
+ for _, m := range t.Diff.Modules {
+ log.Printf("[TRACE] DiffTransformer: Module: %s", m)
+ // TODO: If this is a destroy diff then add a module destroy node
+
+ // Go through all the resources in this module.
+ for name, inst := range m.Resources {
+ log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst)
+
+ // We have changes! This is a create or update operation.
+ // First grab the address so we have a unique way to
+ // reference this resource.
+ addr, err := parseResourceAddressInternal(name)
+ if err != nil {
+ panic(fmt.Sprintf(
+ "Error parsing internal name, this is a bug: %q", name))
+ }
+
+ // Very important: add the module path for this resource to
+ // the address. Remove "root" from it.
+ addr.Path = m.Path[1:]
+
+ // If we're destroying, add the destroy node
+ if inst.Destroy || inst.GetDestroyDeposed() {
+ abstract := &NodeAbstractResource{Addr: addr}
+ g.Add(&NodeDestroyResource{NodeAbstractResource: abstract})
+ }
+
+ // If we have changes, then add the applyable version
+ if len(inst.Attributes) > 0 {
+ // Add the resource to the graph
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ nodes = append(nodes, node)
+ }
+ }
+ }
+
+ // Add all the nodes to the graph
+ for _, n := range nodes {
+ g.Add(n)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
new file mode 100644
index 00000000..982c098b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
@@ -0,0 +1,48 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeExapndable is an interface that nodes can implement to
+// signal that they can be expanded. Expanded nodes turn into
+// GraphNodeSubgraph nodes within the graph.
+type GraphNodeExpandable interface {
+ Expand(GraphBuilder) (GraphNodeSubgraph, error)
+}
+
+// GraphNodeDynamicExpandable is an interface that nodes can implement
+// to signal that they can be expanded at eval-time (hence dynamic).
+// These nodes are given the eval context and are expected to return
+// a new subgraph.
+type GraphNodeDynamicExpandable interface {
+ DynamicExpand(EvalContext) (*Graph, error)
+}
+
+// GraphNodeSubgraph is an interface a node can implement if it has
+// a larger subgraph that should be walked.
+type GraphNodeSubgraph interface {
+ Subgraph() dag.Grapher
+}
+
+// ExpandTransform is a transformer that does a subgraph expansion
+// at graph transform time (vs. at eval time). The benefit of earlier
+// subgraph expansion is that errors with the graph build can be detected
+// at an earlier stage.
+type ExpandTransform struct {
+ Builder GraphBuilder
+}
+
+func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) {
+ ev, ok := v.(GraphNodeExpandable)
+ if !ok {
+ // This isn't an expandable vertex, so just ignore it.
+ return v, nil
+ }
+
+ // Expand the subgraph!
+ log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev))
+ return ev.Expand(t.Builder)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
new file mode 100644
index 00000000..3673771c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
@@ -0,0 +1,38 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ImportProviderValidateTransformer is a GraphTransformer that goes through
+// the providers in the graph and validates that they only depend on variables.
+type ImportProviderValidateTransformer struct{}
+
+func (t *ImportProviderValidateTransformer) Transform(g *Graph) error {
+ for _, v := range g.Vertices() {
+ // We only care about providers
+ pv, ok := v.(GraphNodeProvider)
+ if !ok {
+ continue
+ }
+
+ // We only care about providers that reference things
+ rn, ok := pv.(GraphNodeReferencer)
+ if !ok {
+ continue
+ }
+
+ for _, ref := range rn.References() {
+ if !strings.HasPrefix(ref, "var.") {
+ return fmt.Errorf(
+ "Provider %q depends on non-var %q. Providers for import can currently\n"+
+ "only depend on variables or must be hardcoded. You can stop import\n"+
+ "from loading configurations by specifying `-config=\"\"`.",
+ pv.ProviderName(), ref)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
new file mode 100644
index 00000000..081df2f8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
@@ -0,0 +1,241 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// ImportStateTransformer is a GraphTransformer that adds nodes to the
+// graph to represent the imports we want to do for resources.
+type ImportStateTransformer struct {
+ Targets []*ImportTarget
+}
+
+func (t *ImportStateTransformer) Transform(g *Graph) error {
+ nodes := make([]*graphNodeImportState, 0, len(t.Targets))
+ for _, target := range t.Targets {
+ addr, err := ParseResourceAddress(target.Addr)
+ if err != nil {
+ return fmt.Errorf(
+ "failed to parse resource address '%s': %s",
+ target.Addr, err)
+ }
+
+ nodes = append(nodes, &graphNodeImportState{
+ Addr: addr,
+ ID: target.ID,
+ Provider: target.Provider,
+ })
+ }
+
+ // Build the graph vertices
+ for _, n := range nodes {
+ g.Add(n)
+ }
+
+ return nil
+}
+
+type graphNodeImportState struct {
+ Addr *ResourceAddress // Addr is the resource address to import to
+ ID string // ID is the ID to import as
+ Provider string // Provider string
+
+ states []*InstanceState
+}
+
+func (n *graphNodeImportState) Name() string {
+ return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID)
+}
+
+func (n *graphNodeImportState) ProvidedBy() []string {
+ return []string{resourceProvider(n.Addr.Type, n.Provider)}
+}
+
+// GraphNodeSubPath
+func (n *graphNodeImportState) Path() []string {
+ return normalizeModulePath(n.Addr.Path)
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeImportState) EvalTree() EvalNode {
+ var provider ResourceProvider
+ info := &InstanceInfo{
+ Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name),
+ ModulePath: n.Path(),
+ Type: n.Addr.Type,
+ }
+
+ // Reset our states
+ n.states = nil
+
+ // Return our sequence
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalImportState{
+ Provider: &provider,
+ Info: info,
+ Id: n.ID,
+ Output: &n.states,
+ },
+ },
+ }
+}
+
+// GraphNodeDynamicExpandable impl.
+//
+// We use DynamicExpand as a way to generate the subgraph of refreshes
+// and state inserts we need to do for our import state. Since they're new
+// resources they don't depend on anything else and refreshes are isolated
+// so this is nearly a perfect use case for dynamic expand.
+func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ g := &Graph{Path: ctx.Path()}
+
+ // nameCounter is used to de-dup names in the state.
+ nameCounter := make(map[string]int)
+
+ // Compile the list of addresses that we'll be inserting into the state.
+ // We do this ahead of time so we can verify that we aren't importing
+ // something that already exists.
+ addrs := make([]*ResourceAddress, len(n.states))
+ for i, state := range n.states {
+ addr := *n.Addr
+ if t := state.Ephemeral.Type; t != "" {
+ addr.Type = t
+ }
+
+ // Determine if we need to suffix the name to de-dup
+ key := addr.String()
+ count, ok := nameCounter[key]
+ if ok {
+ count++
+ addr.Name += fmt.Sprintf("-%d", count)
+ }
+ nameCounter[key] = count
+
+ // Add it to our list
+ addrs[i] = &addr
+ }
+
+ // Verify that all the addresses are clear
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+ filter := &StateFilter{State: state}
+ for _, addr := range addrs {
+ result, err := filter.Filter(addr.String())
+ if err != nil {
+ return nil, fmt.Errorf("Error verifying address %s: %s", addr, err)
+ }
+
+ // Go through the filter results and it is an error if we find
+ // a matching InstanceState, meaning that we would have a collision.
+ for _, r := range result {
+ if _, ok := r.Value.(*InstanceState); ok {
+ return nil, fmt.Errorf(
+ "Can't import %s, would collide with an existing resource.\n\n"+
+ "Please remove or rename this resource before continuing.",
+ addr)
+ }
+ }
+ }
+
+ // For each of the states, we add a node to handle the refresh/add to state.
+ // "n.states" is populated by our own EvalTree with the result of
+ // ImportState. Since DynamicExpand is always called after EvalTree, this
+ // is safe.
+ for i, state := range n.states {
+ g.Add(&graphNodeImportStateSub{
+ Target: addrs[i],
+ Path_: n.Path(),
+ State: state,
+ Provider: n.Provider,
+ })
+ }
+
+ // Root transform for a single root
+ t := &RootTransformer{}
+ if err := t.Transform(g); err != nil {
+ return nil, err
+ }
+
+ // Done!
+ return g, nil
+}
+
+// graphNodeImportStateSub is the sub-node of graphNodeImportState
+// and is part of the subgraph. This node is responsible for refreshing
+// and adding a resource to the state once it is imported.
+type graphNodeImportStateSub struct {
+ Target *ResourceAddress
+ State *InstanceState
+ Path_ []string
+ Provider string
+}
+
+func (n *graphNodeImportStateSub) Name() string {
+ return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID)
+}
+
+func (n *graphNodeImportStateSub) Path() []string {
+ return n.Path_
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeImportStateSub) EvalTree() EvalNode {
+ // If the Ephemeral type isn't set, then it is an error
+ if n.State.Ephemeral.Type == "" {
+ err := fmt.Errorf(
+ "import of %s didn't set type for %s",
+ n.Target.String(), n.State.ID)
+ return &EvalReturnError{Error: &err}
+ }
+
+ // DeepCopy so we're only modifying our local copy
+ state := n.State.DeepCopy()
+
+ // Build the resource info
+ info := &InstanceInfo{
+ Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name),
+ ModulePath: n.Path_,
+ Type: n.State.Ephemeral.Type,
+ }
+
+ // Key is the resource key
+ key := &ResourceStateKey{
+ Name: n.Target.Name,
+ Type: info.Type,
+ Index: n.Target.Index,
+ }
+
+ // The eval sequence
+ var provider ResourceProvider
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: resourceProvider(info.Type, n.Provider),
+ Output: &provider,
+ },
+ &EvalRefresh{
+ Provider: &provider,
+ State: &state,
+ Info: info,
+ Output: &state,
+ },
+ &EvalImportStateVerify{
+ Info: info,
+ Id: n.State.ID,
+ State: &state,
+ },
+ &EvalWriteState{
+ Name: key.String(),
+ ResourceType: info.Type,
+ Provider: resourceProvider(info.Type, n.Provider),
+ State: &state,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
new file mode 100644
index 00000000..467950bd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
@@ -0,0 +1,120 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ModuleVariableTransformer is a GraphTransformer that adds all the variables
+// in the configuration to the graph.
+//
+// This only adds variables that are referenced by other things in the graph.
+// If a module variable is not referenced, it won't be added to the graph.
+type ModuleVariableTransformer struct {
+ Module *module.Tree
+
+ DisablePrune bool // True if pruning unreferenced should be disabled
+}
+
+func (t *ModuleVariableTransformer) Transform(g *Graph) error {
+ return t.transform(g, nil, t.Module)
+}
+
+func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error {
+ // If no config, no variables
+ if m == nil {
+ return nil
+ }
+
+ // Transform all the children. This must be done BEFORE the transform
+ // above since child module variables can reference parent module variables.
+ for _, c := range m.Children() {
+ if err := t.transform(g, m, c); err != nil {
+ return err
+ }
+ }
+
+ // If we have a parent, we can determine if a module variable is being
+ // used, so we transform this.
+ if parent != nil {
+ if err := t.transformSingle(g, parent, m); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error {
+ // If we have no vars, we're done!
+ vars := m.Config().Variables
+ if len(vars) == 0 {
+ log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path())
+ return nil
+ }
+
+ // Look for usage of this module
+ var mod *config.Module
+ for _, modUse := range parent.Config().Modules {
+ if modUse.Name == m.Name() {
+ mod = modUse
+ break
+ }
+ }
+ if mod == nil {
+ log.Printf("[INFO] Module %#v not used, not adding variables", m.Path())
+ return nil
+ }
+
+ // Build the reference map so we can determine if we're referencing things.
+ refMap := NewReferenceMap(g.Vertices())
+
+ // Add all variables here
+ for _, v := range vars {
+ // Determine the value of the variable. If it isn't in the
+ // configuration then it was never set and that's not a problem.
+ var value *config.RawConfig
+ if raw, ok := mod.RawConfig.Raw[v.Name]; ok {
+ var err error
+ value, err = config.NewRawConfig(map[string]interface{}{
+ v.Name: raw,
+ })
+ if err != nil {
+ // This shouldn't happen because it is already in
+ // a RawConfig above meaning it worked once before.
+ panic(err)
+ }
+ }
+
+ // Build the node.
+ //
+ // NOTE: For now this is just an "applyable" variable. As we build
+ // new graph builders for the other operations I suspect we'll
+ // find a way to parameterize this, require new transforms, etc.
+ node := &NodeApplyableModuleVariable{
+ PathValue: normalizeModulePath(m.Path()),
+ Config: v,
+ Value: value,
+ Module: t.Module,
+ }
+
+ if !t.DisablePrune {
+ // If the node is not referenced by anything, then we don't need
+ // to include it since it won't be used.
+ if matches := refMap.ReferencedBy(node); len(matches) == 0 {
+ log.Printf(
+ "[INFO] Not including %q in graph, nothing depends on it",
+ dag.VertexName(node))
+ continue
+ }
+ }
+
+ // Add it!
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
new file mode 100644
index 00000000..b256a25b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
@@ -0,0 +1,110 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// OrphanResourceCountTransformer is a GraphTransformer that adds orphans
+// for an expanded count to the graph. The determination of this depends
+// on the count argument given.
+//
+// Orphans are found by comparing the count to what is found in the state.
+// This transform assumes that if an element in the state is within the count
+// bounds given, that it is not an orphan.
+type OrphanResourceCountTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ Count int // Actual count of the resource
+ Addr *ResourceAddress // Addr of the resource to look for orphans
+ State *State // Full global state
+}
+
+func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
+ log.Printf("[TRACE] OrphanResourceCount: Starting...")
+
+ // Grab the module in the state just for this resource address
+ ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path))
+ if ms == nil {
+ // If no state, there can't be orphans
+ return nil
+ }
+
+ orphanIndex := -1
+ if t.Count == 1 {
+ orphanIndex = 0
+ }
+
+ // Go through the orphans and add them all to the state
+ for key, _ := range ms.Resources {
+ // Build the address
+ addr, err := parseResourceAddressInternal(key)
+ if err != nil {
+ return err
+ }
+ addr.Path = ms.Path[1:]
+
+ // Copy the address for comparison. If we aren't looking at
+ // the same resource, then just ignore it.
+ addrCopy := addr.Copy()
+ addrCopy.Index = -1
+ if !addrCopy.Equals(t.Addr) {
+ continue
+ }
+
+ log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr)
+
+ idx := addr.Index
+
+ // If we have zero and the index here is 0 or 1, then we
+ // change the index to a high number so that we treat it as
+ // an orphan.
+ if t.Count <= 0 && idx <= 0 {
+ idx = t.Count + 1
+ }
+
+ // If we have a count greater than 0 and we're at the zero index,
+ // we do a special case check to see if our state also has a
+ // -1 index value. If so, this is an orphan because our rules are
+ // that if both a -1 and 0 are in the state, the 0 is destroyed.
+ if t.Count > 0 && idx == orphanIndex {
+ // This is a piece of cleverness (beware), but its simple:
+ // if orphanIndex is 0, then check -1, else check 0.
+ checkIndex := (orphanIndex + 1) * -1
+
+ key := &ResourceStateKey{
+ Name: addr.Name,
+ Type: addr.Type,
+ Mode: addr.Mode,
+ Index: checkIndex,
+ }
+
+ if _, ok := ms.Resources[key.String()]; ok {
+ // We have a -1 index, too. Make an arbitrarily high
+ // index so that we always mark this as an orphan.
+ log.Printf(
+ "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d",
+ addr, orphanIndex)
+ idx = t.Count + 1
+ }
+ }
+
+ // If the index is within the count bounds, it is not an orphan
+ if idx < t.Count {
+ continue
+ }
+
+ // Build the abstract node and the concrete one
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ // Add it to the graph
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
new file mode 100644
index 00000000..49568d5b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
@@ -0,0 +1,64 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// OrphanOutputTransformer finds the outputs that aren't present
+// in the given config that are in the state and adds them to the graph
+// for deletion.
+type OrphanOutputTransformer struct {
+ Module *module.Tree // Root module
+ State *State // State is the root state
+}
+
+func (t *OrphanOutputTransformer) Transform(g *Graph) error {
+ if t.State == nil {
+ log.Printf("[DEBUG] No state, no orphan outputs")
+ return nil
+ }
+
+ return t.transform(g, t.Module)
+}
+
+func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error {
+ // Get our configuration, and recurse into children
+ var c *config.Config
+ if m != nil {
+ c = m.Config()
+ for _, child := range m.Children() {
+ if err := t.transform(g, child); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Get the state. If there is no state, then we have no orphans!
+ path := normalizeModulePath(m.Path())
+ state := t.State.ModuleByPath(path)
+ if state == nil {
+ return nil
+ }
+
+ // Make a map of the valid outputs
+ valid := make(map[string]struct{})
+ for _, o := range c.Outputs {
+ valid[o.Name] = struct{}{}
+ }
+
+ // Go through the outputs and find the ones that aren't in our config.
+ for n, _ := range state.Outputs {
+ // If it is in the valid map, then ignore
+ if _, ok := valid[n]; ok {
+ continue
+ }
+
+ // Orphan!
+ g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path})
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
new file mode 100644
index 00000000..e42d3c84
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
@@ -0,0 +1,78 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// OrphanResourceTransformer is a GraphTransformer that adds resource
+// orphans to the graph. A resource orphan is a resource that is
+// represented in the state but not in the configuration.
+//
+// This only adds orphans that have no representation at all in the
+// configuration.
+type OrphanResourceTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ // State is the global state. We require the global state to
+ // properly find module orphans at our path.
+ State *State
+
+ // Module is the root module. We'll look up the proper configuration
+ // using the graph path.
+ Module *module.Tree
+}
+
+func (t *OrphanResourceTransformer) Transform(g *Graph) error {
+ if t.State == nil {
+ // If the entire state is nil, there can't be any orphans
+ return nil
+ }
+
+ // Go through the modules and for each module transform in order
+ // to add the orphan.
+ for _, ms := range t.State.Modules {
+ if err := t.transform(g, ms); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error {
+ if ms == nil {
+ return nil
+ }
+
+ // Get the configuration for this path. The configuration might be
+ // nil if the module was removed from the configuration. This is okay,
+ // this just means that every resource is an orphan.
+ var c *config.Config
+ if m := t.Module.Child(ms.Path[1:]); m != nil {
+ c = m.Config()
+ }
+
+ // Go through the orphans and add them all to the state
+ for _, key := range ms.Orphans(c) {
+ // Build the abstract resource
+ addr, err := parseResourceAddressInternal(key)
+ if err != nil {
+ return err
+ }
+ addr.Path = ms.Path[1:]
+
+ // Build the abstract node and the concrete one
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ // Add it to the graph
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
new file mode 100644
index 00000000..b260f4ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
@@ -0,0 +1,59 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// OutputTransformer is a GraphTransformer that adds all the outputs
+// in the configuration to the graph.
+//
+// This is done for the apply graph builder even if dependent nodes
+// aren't changing since there is no downside: the state will be available
+// even if the dependent items aren't changing.
+type OutputTransformer struct {
+ Module *module.Tree
+}
+
+func (t *OutputTransformer) Transform(g *Graph) error {
+ return t.transform(g, t.Module)
+}
+
+func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error {
+ // If no config, no outputs
+ if m == nil {
+ return nil
+ }
+
+ // Transform all the children. We must do this first because
+ // we can reference module outputs and they must show up in the
+ // reference map.
+ for _, c := range m.Children() {
+ if err := t.transform(g, c); err != nil {
+ return err
+ }
+ }
+
+ // If we have no outputs, we're done!
+ os := m.Config().Outputs
+ if len(os) == 0 {
+ return nil
+ }
+
+ // Add all outputs here
+ for _, o := range os {
+ // Build the node.
+ //
+ // NOTE: For now this is just an "applyable" output. As we build
+ // new graph builders for the other operations I suspect we'll
+ // find a way to parameterize this, require new transforms, etc.
+ node := &NodeApplyableOutput{
+ PathValue: normalizeModulePath(m.Path()),
+ Config: o,
+ }
+
+ // Add it!
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
new file mode 100644
index 00000000..b9695d52
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
@@ -0,0 +1,380 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeProvider is an interface that nodes that can be a provider
+// must implement. The ProviderName returned is the name of the provider
+// they satisfy.
+type GraphNodeProvider interface {
+ ProviderName() string
+}
+
+// GraphNodeCloseProvider is an interface that nodes that can be a close
+// provider must implement. The CloseProviderName returned is the name of
+// the provider they satisfy.
+type GraphNodeCloseProvider interface {
+ CloseProviderName() string
+}
+
+// GraphNodeProviderConsumer is an interface that nodes that require
+// a provider must implement. ProvidedBy must return the name of the provider
+// to use.
+type GraphNodeProviderConsumer interface {
+ ProvidedBy() []string
+}
+
+// ProviderTransformer is a GraphTransformer that maps resources to
+// providers within the graph. This will error if there are any resources
+// that don't map to proper resources.
+type ProviderTransformer struct{}
+
+func (t *ProviderTransformer) Transform(g *Graph) error {
+ // Go through the other nodes and match them to providers they need
+ var err error
+ m := providerVertexMap(g)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProviderConsumer); ok {
+ for _, p := range pv.ProvidedBy() {
+ target := m[providerMapKey(p, pv)]
+ if target == nil {
+ println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv)))
+ err = multierror.Append(err, fmt.Errorf(
+ "%s: provider %s couldn't be found",
+ dag.VertexName(v), p))
+ continue
+ }
+
+ g.Connect(dag.BasicEdge(v, target))
+ }
+ }
+ }
+
+ return err
+}
+
+// CloseProviderTransformer is a GraphTransformer that adds nodes to the
+// graph that will close open provider connections that aren't needed anymore.
+// A provider connection is not needed anymore once all depended resources
+// in the graph are evaluated.
+type CloseProviderTransformer struct{}
+
+func (t *CloseProviderTransformer) Transform(g *Graph) error {
+ pm := providerVertexMap(g)
+ cpm := closeProviderVertexMap(g)
+ var err error
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProviderConsumer); ok {
+ for _, p := range pv.ProvidedBy() {
+ key := p
+ source := cpm[key]
+
+ if source == nil {
+ // Create a new graphNodeCloseProvider and add it to the graph
+ source = &graphNodeCloseProvider{ProviderNameValue: p}
+ g.Add(source)
+
+ // Close node needs to depend on provider
+ provider, ok := pm[key]
+ if !ok {
+ err = multierror.Append(err, fmt.Errorf(
+ "%s: provider %s couldn't be found for closing",
+ dag.VertexName(v), p))
+ continue
+ }
+ g.Connect(dag.BasicEdge(source, provider))
+
+ // Make sure we also add the new graphNodeCloseProvider to the map
+ // so we don't create and add any duplicate graphNodeCloseProviders.
+ cpm[key] = source
+ }
+
+ // Close node depends on all nodes provided by the provider
+ g.Connect(dag.BasicEdge(source, v))
+ }
+ }
+ }
+
+ return err
+}
+
+// MissingProviderTransformer is a GraphTransformer that adds nodes
+// for missing providers into the graph. Specifically, it creates provider
+// configuration nodes for all the providers that we support. These are
+// pruned later during an optimization pass.
+type MissingProviderTransformer struct {
+ // Providers is the list of providers we support.
+ Providers []string
+
+ // AllowAny will not check that a provider is supported before adding
+ // it to the graph.
+ AllowAny bool
+
+ // Concrete, if set, overrides how the providers are made.
+ Concrete ConcreteProviderNodeFunc
+}
+
+func (t *MissingProviderTransformer) Transform(g *Graph) error {
+ // Initialize factory
+ if t.Concrete == nil {
+ t.Concrete = func(a *NodeAbstractProvider) dag.Vertex {
+ return a
+ }
+ }
+
+ // Create a set of our supported providers
+ supported := make(map[string]struct{}, len(t.Providers))
+ for _, v := range t.Providers {
+ supported[v] = struct{}{}
+ }
+
+ // Get the map of providers we already have in our graph
+ m := providerVertexMap(g)
+
+ // Go through all the provider consumers and make sure we add
+ // that provider if it is missing. We use a for loop here instead
+ // of "range" since we'll modify check as we go to add more to check.
+ check := g.Vertices()
+ for i := 0; i < len(check); i++ {
+ v := check[i]
+
+ pv, ok := v.(GraphNodeProviderConsumer)
+ if !ok {
+ continue
+ }
+
+ // If this node has a subpath, then we use that as a prefix
+ // into our map to check for an existing provider.
+ var path []string
+ if sp, ok := pv.(GraphNodeSubPath); ok {
+ raw := normalizeModulePath(sp.Path())
+ if len(raw) > len(rootModulePath) {
+ path = raw
+ }
+ }
+
+ for _, p := range pv.ProvidedBy() {
+ key := providerMapKey(p, pv)
+ if _, ok := m[key]; ok {
+ // This provider already exists as a configure node
+ continue
+ }
+
+ // If the provider has an alias in it, we just want the type
+ ptype := p
+ if idx := strings.IndexRune(p, '.'); idx != -1 {
+ ptype = p[:idx]
+ }
+
+ if !t.AllowAny {
+ if _, ok := supported[ptype]; !ok {
+ // If we don't support the provider type, skip it.
+ // Validation later will catch this as an error.
+ continue
+ }
+ }
+
+ // Add the missing provider node to the graph
+ v := t.Concrete(&NodeAbstractProvider{
+ NameValue: p,
+ PathValue: path,
+ }).(dag.Vertex)
+ if len(path) > 0 {
+ // We'll need the parent provider as well, so let's
+ // add a dummy node to check to make sure that we add
+ // that parent provider.
+ check = append(check, &graphNodeProviderConsumerDummy{
+ ProviderValue: p,
+ PathValue: path[:len(path)-1],
+ })
+ }
+
+ m[key] = g.Add(v)
+ }
+ }
+
+ return nil
+}
+
+// ParentProviderTransformer connects provider nodes to their parents.
+//
+// This works by finding nodes that are both GraphNodeProviders and
+// GraphNodeSubPath. It then connects the providers to their parent
+// path.
+type ParentProviderTransformer struct{}
+
+func (t *ParentProviderTransformer) Transform(g *Graph) error {
+ // Make a mapping of path to dag.Vertex, where path is: "path.name"
+ m := make(map[string]dag.Vertex)
+
+ // Also create a map that maps a provider to its parent
+ parentMap := make(map[dag.Vertex]string)
+ for _, raw := range g.Vertices() {
+ // If it is the flat version, then make it the non-flat version.
+ // We eventually want to get rid of the flat version entirely so
+ // this is a stop-gap while it still exists.
+ var v dag.Vertex = raw
+
+ // Only care about providers
+ pn, ok := v.(GraphNodeProvider)
+ if !ok || pn.ProviderName() == "" {
+ continue
+ }
+
+ // Also require a subpath, if there is no subpath then we
+ // just totally ignore it. The expectation of this transform is
+ // that it is used with a graph builder that is already flattened.
+ var path []string
+ if pn, ok := raw.(GraphNodeSubPath); ok {
+ path = pn.Path()
+ }
+ path = normalizeModulePath(path)
+
+ // Build the key with path.name i.e. "child.subchild.aws"
+ key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
+ m[key] = raw
+
+ // Determine the parent if we're non-root. This is length 1 since
+ // the 0 index should be "root" since we normalize above.
+ if len(path) > 1 {
+ path = path[:len(path)-1]
+ key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
+ parentMap[raw] = key
+ }
+ }
+
+ // Connect!
+ for v, key := range parentMap {
+ if parent, ok := m[key]; ok {
+ g.Connect(dag.BasicEdge(v, parent))
+ }
+ }
+
+ return nil
+}
+
+// PruneProviderTransformer is a GraphTransformer that prunes all the
+// providers that aren't needed from the graph. A provider is unneeded if
+// no resource or module is using that provider.
+type PruneProviderTransformer struct{}
+
+func (t *PruneProviderTransformer) Transform(g *Graph) error {
+ for _, v := range g.Vertices() {
+ // We only care about the providers
+ if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" {
+ continue
+ }
+ // Does anything depend on this? If not, then prune it.
+ if s := g.UpEdges(v); s.Len() == 0 {
+ if nv, ok := v.(dag.NamedVertex); ok {
+ log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name())
+ }
+ g.Remove(v)
+ }
+ }
+
+ return nil
+}
+
+// providerMapKey is a helper that gives us the key to use for the
+// maps returned by things such as providerVertexMap.
+func providerMapKey(k string, v dag.Vertex) string {
+ pathPrefix := ""
+ if sp, ok := v.(GraphNodeSubPath); ok {
+ raw := normalizeModulePath(sp.Path())
+ if len(raw) > len(rootModulePath) {
+ pathPrefix = modulePrefixStr(raw) + "."
+ }
+ }
+
+ return pathPrefix + k
+}
+
+func providerVertexMap(g *Graph) map[string]dag.Vertex {
+ m := make(map[string]dag.Vertex)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProvider); ok {
+ key := providerMapKey(pv.ProviderName(), v)
+ m[key] = v
+ }
+ }
+
+ return m
+}
+
+func closeProviderVertexMap(g *Graph) map[string]dag.Vertex {
+ m := make(map[string]dag.Vertex)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeCloseProvider); ok {
+ m[pv.CloseProviderName()] = v
+ }
+ }
+
+ return m
+}
+
+type graphNodeCloseProvider struct {
+ ProviderNameValue string
+}
+
+func (n *graphNodeCloseProvider) Name() string {
+ return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue)
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeCloseProvider) EvalTree() EvalNode {
+ return CloseProviderEvalTree(n.ProviderNameValue)
+}
+
+// GraphNodeDependable impl.
+func (n *graphNodeCloseProvider) DependableName() []string {
+ return []string{n.Name()}
+}
+
+func (n *graphNodeCloseProvider) CloseProviderName() string {
+ return n.ProviderNameValue
+}
+
+// GraphNodeDotter impl.
+func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
+ if !opts.Verbose {
+ return nil
+ }
+ return &dag.DotNode{
+ Name: name,
+ Attrs: map[string]string{
+ "label": n.Name(),
+ "shape": "diamond",
+ },
+ }
+}
+
+// RemovableIfNotTargeted
+func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool {
+ // We need to add this so that this node will be removed if
+ // it isn't targeted or a dependency of a target.
+ return true
+}
+
+// graphNodeProviderConsumerDummy is a struct that never enters the real
+// graph (though it could to no ill effect). It implements
+// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force
+// certain transformations.
+type graphNodeProviderConsumerDummy struct {
+ ProviderValue string
+ PathValue []string
+}
+
+func (n *graphNodeProviderConsumerDummy) Path() []string {
+ return n.PathValue
+}
+
+func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string {
+ return []string{n.ProviderValue}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
new file mode 100644
index 00000000..d9919f3a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
@@ -0,0 +1,50 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// DisableProviderTransformer "disables" any providers that are not actually
+// used by anything. This avoids the provider being initialized and configured.
+// This both saves resources but also avoids errors since configuration
+// may imply initialization which may require auth.
+type DisableProviderTransformer struct{}
+
+func (t *DisableProviderTransformer) Transform(g *Graph) error {
+ for _, v := range g.Vertices() {
+ // We only care about providers
+ pn, ok := v.(GraphNodeProvider)
+ if !ok || pn.ProviderName() == "" {
+ continue
+ }
+
+ // If we have dependencies, then don't disable
+ if g.UpEdges(v).Len() > 0 {
+ continue
+ }
+
+ // Get the path
+ var path []string
+ if pn, ok := v.(GraphNodeSubPath); ok {
+ path = pn.Path()
+ }
+
+ // Disable the provider by replacing it with a "disabled" provider
+ disabled := &NodeDisabledProvider{
+ NodeAbstractProvider: &NodeAbstractProvider{
+ NameValue: pn.ProviderName(),
+ PathValue: path,
+ },
+ }
+
+ if !g.Replace(v, disabled) {
+ panic(fmt.Sprintf(
+ "vertex disappeared from under us: %s",
+ dag.VertexName(v)))
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
new file mode 100644
index 00000000..f49d8241
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
@@ -0,0 +1,206 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeProvisioner is an interface that nodes that can be a provisioner
+// must implement. The ProvisionerName returned is the name of the provisioner
+// they satisfy.
+type GraphNodeProvisioner interface {
+ ProvisionerName() string
+}
+
+// GraphNodeCloseProvisioner is an interface that nodes that can be a close
+// provisioner must implement. The CloseProvisionerName returned is the name
+// of the provisioner they satisfy.
+type GraphNodeCloseProvisioner interface {
+ CloseProvisionerName() string
+}
+
+// GraphNodeProvisionerConsumer is an interface that nodes that require
+// a provisioner must implement. ProvisionedBy must return the name of the
+// provisioner to use.
+type GraphNodeProvisionerConsumer interface {
+ ProvisionedBy() []string
+}
+
+// ProvisionerTransformer is a GraphTransformer that maps resources to
+// provisioners within the graph. This will error if there are any resources
+// that don't map to proper resources.
+type ProvisionerTransformer struct{}
+
+func (t *ProvisionerTransformer) Transform(g *Graph) error {
+ // Go through the other nodes and match them to provisioners they need
+ var err error
+ m := provisionerVertexMap(g)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
+ for _, p := range pv.ProvisionedBy() {
+ key := provisionerMapKey(p, pv)
+ if m[key] == nil {
+ err = multierror.Append(err, fmt.Errorf(
+ "%s: provisioner %s couldn't be found",
+ dag.VertexName(v), p))
+ continue
+ }
+
+ g.Connect(dag.BasicEdge(v, m[key]))
+ }
+ }
+ }
+
+ return err
+}
+
+// MissingProvisionerTransformer is a GraphTransformer that adds nodes
+// for missing provisioners into the graph.
+type MissingProvisionerTransformer struct {
+ // Provisioners is the list of provisioners we support.
+ Provisioners []string
+}
+
+func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
+ // Create a set of our supported provisioners
+ supported := make(map[string]struct{}, len(t.Provisioners))
+ for _, v := range t.Provisioners {
+ supported[v] = struct{}{}
+ }
+
+ // Get the map of provisioners we already have in our graph
+ m := provisionerVertexMap(g)
+
+ // Go through all the provisioner consumers and make sure we add
+ // that provisioner if it is missing.
+ for _, v := range g.Vertices() {
+ pv, ok := v.(GraphNodeProvisionerConsumer)
+ if !ok {
+ continue
+ }
+
+ // If this node has a subpath, then we use that as a prefix
+ // into our map to check for an existing provider.
+ var path []string
+ if sp, ok := pv.(GraphNodeSubPath); ok {
+ raw := normalizeModulePath(sp.Path())
+ if len(raw) > len(rootModulePath) {
+ path = raw
+ }
+ }
+
+ for _, p := range pv.ProvisionedBy() {
+ // Build the key for storing in the map
+ key := provisionerMapKey(p, pv)
+
+ if _, ok := m[key]; ok {
+ // This provisioner already exists as a configure node
+ continue
+ }
+
+ if _, ok := supported[p]; !ok {
+ // If we don't support the provisioner type, skip it.
+ // Validation later will catch this as an error.
+ continue
+ }
+
+ // Build the vertex
+ var newV dag.Vertex = &NodeProvisioner{
+ NameValue: p,
+ PathValue: path,
+ }
+
+ // Add the missing provisioner node to the graph
+ m[key] = g.Add(newV)
+ }
+ }
+
+ return nil
+}
+
+// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the
+// graph that will close open provisioner connections that aren't needed
+// anymore. A provisioner connection is not needed anymore once all depended
+// resources in the graph are evaluated.
+type CloseProvisionerTransformer struct{}
+
+func (t *CloseProvisionerTransformer) Transform(g *Graph) error {
+ m := closeProvisionerVertexMap(g)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
+ for _, p := range pv.ProvisionedBy() {
+ source := m[p]
+
+ if source == nil {
+ // Create a new graphNodeCloseProvisioner and add it to the graph
+ source = &graphNodeCloseProvisioner{ProvisionerNameValue: p}
+ g.Add(source)
+
+ // Make sure we also add the new graphNodeCloseProvisioner to the map
+ // so we don't create and add any duplicate graphNodeCloseProvisioners.
+ m[p] = source
+ }
+
+ g.Connect(dag.BasicEdge(source, v))
+ }
+ }
+ }
+
+ return nil
+}
+
+// provisionerMapKey is a helper that gives us the key to use for the
+// maps returned by things such as provisionerVertexMap.
+func provisionerMapKey(k string, v dag.Vertex) string {
+ pathPrefix := ""
+ if sp, ok := v.(GraphNodeSubPath); ok {
+ raw := normalizeModulePath(sp.Path())
+ if len(raw) > len(rootModulePath) {
+ pathPrefix = modulePrefixStr(raw) + "."
+ }
+ }
+
+ return pathPrefix + k
+}
+
+func provisionerVertexMap(g *Graph) map[string]dag.Vertex {
+ m := make(map[string]dag.Vertex)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProvisioner); ok {
+ key := provisionerMapKey(pv.ProvisionerName(), v)
+ m[key] = v
+ }
+ }
+
+ return m
+}
+
+func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex {
+ m := make(map[string]dag.Vertex)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeCloseProvisioner); ok {
+ m[pv.CloseProvisionerName()] = v
+ }
+ }
+
+ return m
+}
+
+type graphNodeCloseProvisioner struct {
+ ProvisionerNameValue string
+}
+
+func (n *graphNodeCloseProvisioner) Name() string {
+ return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue)
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeCloseProvisioner) EvalTree() EvalNode {
+ return &EvalCloseProvisioner{Name: n.ProvisionerNameValue}
+}
+
+func (n *graphNodeCloseProvisioner) CloseProvisionerName() string {
+ return n.ProvisionerNameValue
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
new file mode 100644
index 00000000..c5452354
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
@@ -0,0 +1,321 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeReferenceable must be implemented by any node that represents
+// a Terraform thing that can be referenced (resource, module, etc.).
+//
+// Even if the thing has no name, this should return an empty list. By
+// implementing this and returning a non-nil result, you say that this CAN
+// be referenced and other methods of referencing may still be possible (such
+// as by path!)
+type GraphNodeReferenceable interface {
+ // ReferenceableName is the name by which this can be referenced.
+ // This can be either just the type, or include the field. Example:
+ // "aws_instance.bar" or "aws_instance.bar.id".
+ ReferenceableName() []string
+}
+
+// GraphNodeReferencer must be implemented by nodes that reference other
+// Terraform items and therefore depend on them.
+type GraphNodeReferencer interface {
+ // References are the list of things that this node references. This
+ // can include fields or just the type, just like GraphNodeReferenceable
+ // above.
+ References() []string
+}
+
+// GraphNodeReferenceGlobal is an interface that can optionally be
+// implemented. If ReferenceGlobal returns true, then the References()
+// and ReferenceableName() must be _fully qualified_ with "module.foo.bar"
+// etc.
+//
+// This allows a node to reference and be referenced by a specific name
+// that may cross module boundaries. This can be very dangerous so use
+// this wisely.
+//
+// The primary use case for this is module boundaries (variables coming in).
+type GraphNodeReferenceGlobal interface {
+ // Set to true to signal that references and name are fully
+ // qualified. See the above docs for more information.
+ ReferenceGlobal() bool
+}
+
+// ReferenceTransformer is a GraphTransformer that connects all the
+// nodes that reference each other in order to form the proper ordering.
+type ReferenceTransformer struct{}
+
+func (t *ReferenceTransformer) Transform(g *Graph) error {
+ // Build a reference map so we can efficiently look up the references
+ vs := g.Vertices()
+ m := NewReferenceMap(vs)
+
+ // Find the things that reference things and connect them
+ for _, v := range vs {
+ parents, _ := m.References(v)
+ parentsDbg := make([]string, len(parents))
+ for i, v := range parents {
+ parentsDbg[i] = dag.VertexName(v)
+ }
+ log.Printf(
+ "[DEBUG] ReferenceTransformer: %q references: %v",
+ dag.VertexName(v), parentsDbg)
+
+ for _, parent := range parents {
+ g.Connect(dag.BasicEdge(v, parent))
+ }
+ }
+
+ return nil
+}
+
+// ReferenceMap is a structure that can be used to efficiently check
+// for references on a graph.
+type ReferenceMap struct {
+ // m is the mapping of referenceable name to list of verticies that
+ // implement that name. This is built on initialization.
+ references map[string][]dag.Vertex
+ referencedBy map[string][]dag.Vertex
+}
+
+// References returns the list of vertices that this vertex
+// references along with any missing references.
+func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) {
+ rn, ok := v.(GraphNodeReferencer)
+ if !ok {
+ return nil, nil
+ }
+
+ var matches []dag.Vertex
+ var missing []string
+ prefix := m.prefix(v)
+ for _, ns := range rn.References() {
+ found := false
+ for _, n := range strings.Split(ns, "/") {
+ n = prefix + n
+ parents, ok := m.references[n]
+ if !ok {
+ continue
+ }
+
+ // Mark that we found a match
+ found = true
+
+ // Make sure this isn't a self reference, which isn't included
+ selfRef := false
+ for _, p := range parents {
+ if p == v {
+ selfRef = true
+ break
+ }
+ }
+ if selfRef {
+ continue
+ }
+
+ matches = append(matches, parents...)
+ break
+ }
+
+ if !found {
+ missing = append(missing, ns)
+ }
+ }
+
+ return matches, missing
+}
+
+// ReferencedBy returns the list of vertices that reference the
+// vertex passed in.
+func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex {
+ rn, ok := v.(GraphNodeReferenceable)
+ if !ok {
+ return nil
+ }
+
+ var matches []dag.Vertex
+ prefix := m.prefix(v)
+ for _, n := range rn.ReferenceableName() {
+ n = prefix + n
+ children, ok := m.referencedBy[n]
+ if !ok {
+ continue
+ }
+
+ // Make sure this isn't a self reference, which isn't included
+ selfRef := false
+ for _, p := range children {
+ if p == v {
+ selfRef = true
+ break
+ }
+ }
+ if selfRef {
+ continue
+ }
+
+ matches = append(matches, children...)
+ }
+
+ return matches
+}
+
+func (m *ReferenceMap) prefix(v dag.Vertex) string {
+ // If the node is stating it is already fully qualified then
+ // we don't have to create the prefix!
+ if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() {
+ return ""
+ }
+
+ // Create the prefix based on the path
+ var prefix string
+ if pn, ok := v.(GraphNodeSubPath); ok {
+ if path := normalizeModulePath(pn.Path()); len(path) > 1 {
+ prefix = modulePrefixStr(path) + "."
+ }
+ }
+
+ return prefix
+}
+
+// NewReferenceMap is used to create a new reference map for the
+// given set of vertices.
+func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
+ var m ReferenceMap
+
+ // Build the lookup table
+ refMap := make(map[string][]dag.Vertex)
+ for _, v := range vs {
+ // We're only looking for referenceable nodes
+ rn, ok := v.(GraphNodeReferenceable)
+ if !ok {
+ continue
+ }
+
+ // Go through and cache them
+ prefix := m.prefix(v)
+ for _, n := range rn.ReferenceableName() {
+ n = prefix + n
+ refMap[n] = append(refMap[n], v)
+ }
+
+ // If there is a path, it is always referenceable by that. For
+ // example, if this is a referenceable thing at path []string{"foo"},
+ // then it can be referenced at "module.foo"
+ if pn, ok := v.(GraphNodeSubPath); ok {
+ for _, p := range ReferenceModulePath(pn.Path()) {
+ refMap[p] = append(refMap[p], v)
+ }
+ }
+ }
+
+ // Build the lookup table for referenced by
+ refByMap := make(map[string][]dag.Vertex)
+ for _, v := range vs {
+ // We're only looking for referenceable nodes
+ rn, ok := v.(GraphNodeReferencer)
+ if !ok {
+ continue
+ }
+
+ // Go through and cache them
+ prefix := m.prefix(v)
+ for _, n := range rn.References() {
+ n = prefix + n
+ refByMap[n] = append(refByMap[n], v)
+ }
+ }
+
+ m.references = refMap
+ m.referencedBy = refByMap
+ return &m
+}
+
+// Returns the reference name for a module path. The path "foo" would return
+// "module.foo". If this is a deeply nested module, it will be every parent
+// as well. For example: ["foo", "bar"] would return both "module.foo" and
+// "module.foo.module.bar"
+func ReferenceModulePath(p []string) []string {
+ p = normalizeModulePath(p)
+ if len(p) == 1 {
+ // Root, no name
+ return nil
+ }
+
+ result := make([]string, 0, len(p)-1)
+ for i := len(p); i > 1; i-- {
+ result = append(result, modulePrefixStr(p[:i]))
+ }
+
+ return result
+}
+
+// ReferencesFromConfig returns the references that a configuration has
+// based on the interpolated variables in a configuration.
+func ReferencesFromConfig(c *config.RawConfig) []string {
+ var result []string
+ for _, v := range c.Variables {
+ if r := ReferenceFromInterpolatedVar(v); len(r) > 0 {
+ result = append(result, r...)
+ }
+ }
+
+ return result
+}
+
+// ReferenceFromInterpolatedVar returns the reference from this variable,
+// or an empty string if there is no reference.
+func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string {
+ switch v := v.(type) {
+ case *config.ModuleVariable:
+ return []string{fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)}
+ case *config.ResourceVariable:
+ id := v.ResourceId()
+
+ // If we have a multi-reference (splat), then we depend on ALL
+ // resources with this type/name.
+ if v.Multi && v.Index == -1 {
+ return []string{fmt.Sprintf("%s.*", id)}
+ }
+
+ // Otherwise, we depend on a specific index.
+ idx := v.Index
+ if !v.Multi || v.Index == -1 {
+ idx = 0
+ }
+
+ // Depend on the index, as well as "N" which represents the
+ // un-expanded set of resources.
+ return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)}
+ case *config.UserVariable:
+ return []string{fmt.Sprintf("var.%s", v.Name)}
+ default:
+ return nil
+ }
+}
+
+func modulePrefixStr(p []string) string {
+ parts := make([]string, 0, len(p)*2)
+ for _, p := range p[1:] {
+ parts = append(parts, "module", p)
+ }
+
+ return strings.Join(parts, ".")
+}
+
+func modulePrefixList(result []string, prefix string) []string {
+ if prefix != "" {
+ for i, v := range result {
+ result[i] = fmt.Sprintf("%s.%s", prefix, v)
+ }
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
new file mode 100644
index 00000000..cda35cb7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
@@ -0,0 +1,51 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ResourceCountTransformer is a GraphTransformer that expands the count
+// out for a specific resource.
+//
+// This assumes that the count is already interpolated.
+type ResourceCountTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ Count int
+ Addr *ResourceAddress
+}
+
+func (t *ResourceCountTransformer) Transform(g *Graph) error {
+ // Don't allow the count to be negative
+ if t.Count < 0 {
+ return fmt.Errorf("negative count: %d", t.Count)
+ }
+
+ // For each count, build and add the node
+ for i := 0; i < t.Count; i++ {
+ // Set the index. If our count is 1 we special case it so that
+ // we handle the "resource.0" and "resource" boundary properly.
+ index := i
+ if t.Count == 1 {
+ index = -1
+ }
+
+ // Build the resource address
+ addr := t.Addr.Copy()
+ addr.Index = index
+
+ // Build the abstract node and the concrete one
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ // Add it to the graph
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
new file mode 100644
index 00000000..aee053d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
@@ -0,0 +1,38 @@
+package terraform
+
+import "github.com/hashicorp/terraform/dag"
+
+const rootNodeName = "root"
+
+// RootTransformer is a GraphTransformer that adds a root to the graph.
+type RootTransformer struct{}
+
+func (t *RootTransformer) Transform(g *Graph) error {
+ // If we already have a good root, we're done
+ if _, err := g.Root(); err == nil {
+ return nil
+ }
+
+ // Add a root
+ var root graphNodeRoot
+ g.Add(root)
+
+ // Connect the root to all the edges that need it
+ for _, v := range g.Vertices() {
+ if v == root {
+ continue
+ }
+
+ if g.UpEdges(v).Len() == 0 {
+ g.Connect(dag.BasicEdge(root, v))
+ }
+ }
+
+ return nil
+}
+
+type graphNodeRoot struct{}
+
+func (n graphNodeRoot) Name() string {
+ return rootNodeName
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
new file mode 100644
index 00000000..471cd746
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
@@ -0,0 +1,65 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// StateTransformer is a GraphTransformer that adds the elements of
+// the state to the graph.
+//
+// This transform is used for example by the DestroyPlanGraphBuilder to ensure
+// that only resources that are in the state are represented in the graph.
+type StateTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ State *State
+}
+
+func (t *StateTransformer) Transform(g *Graph) error {
+ // If the state is nil or empty (nil is empty) then do nothing
+ if t.State.Empty() {
+ return nil
+ }
+
+ // Go through all the modules in the diff.
+ log.Printf("[TRACE] StateTransformer: starting")
+ var nodes []dag.Vertex
+ for _, ms := range t.State.Modules {
+ log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path)
+
+ // Go through all the resources in this module.
+ for name, rs := range ms.Resources {
+ log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs)
+
+ // Add the resource to the graph
+ addr, err := parseResourceAddressInternal(name)
+ if err != nil {
+ panic(fmt.Sprintf(
+ "Error parsing internal name, this is a bug: %q", name))
+ }
+
+ // Very important: add the module path for this resource to
+ // the address. Remove "root" from it.
+ addr.Path = ms.Path[1:]
+
+ // Add the resource to the graph
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ nodes = append(nodes, node)
+ }
+ }
+
+ // Add all the nodes to the graph
+ for _, n := range nodes {
+ g.Add(n)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
new file mode 100644
index 00000000..225ac4b4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
@@ -0,0 +1,144 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeTargetable is an interface for graph nodes to implement when they
+// need to be told about incoming targets. This is useful for nodes that need
+// to respect targets as they dynamically expand. Note that the list of targets
+// provided will contain every target provided, and each implementing graph
+// node must filter this list to targets considered relevant.
+type GraphNodeTargetable interface {
+ SetTargets([]ResourceAddress)
+}
+
+// TargetsTransformer is a GraphTransformer that, when the user specifies a
+// list of resources to target, limits the graph to only those resources and
+// their dependencies.
+type TargetsTransformer struct {
+ // List of targeted resource names specified by the user
+ Targets []string
+
+ // List of parsed targets, provided by callers like ResourceCountTransform
+ // that already have the targets parsed
+ ParsedTargets []ResourceAddress
+
+ // Set to true when we're in a `terraform destroy` or a
+ // `terraform plan -destroy`
+ Destroy bool
+}
+
+func (t *TargetsTransformer) Transform(g *Graph) error {
+ if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 {
+ addrs, err := t.parseTargetAddresses()
+ if err != nil {
+ return err
+ }
+
+ t.ParsedTargets = addrs
+ }
+
+ if len(t.ParsedTargets) > 0 {
+ targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets)
+ if err != nil {
+ return err
+ }
+
+ for _, v := range g.Vertices() {
+ removable := false
+ if _, ok := v.(GraphNodeResource); ok {
+ removable = true
+ }
+ if vr, ok := v.(RemovableIfNotTargeted); ok {
+ removable = vr.RemoveIfNotTargeted()
+ }
+ if removable && !targetedNodes.Include(v) {
+ log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v))
+ g.Remove(v)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) {
+ addrs := make([]ResourceAddress, len(t.Targets))
+ for i, target := range t.Targets {
+ ta, err := ParseResourceAddress(target)
+ if err != nil {
+ return nil, err
+ }
+ addrs[i] = *ta
+ }
+
+ return addrs, nil
+}
+
+// Returns the list of targeted nodes. A targeted node is either addressed
+// directly, or is an Ancestor of a targeted node. Destroy mode keeps
+// Descendents instead of Ancestors.
+func (t *TargetsTransformer) selectTargetedNodes(
+ g *Graph, addrs []ResourceAddress) (*dag.Set, error) {
+ targetedNodes := new(dag.Set)
+ for _, v := range g.Vertices() {
+ if t.nodeIsTarget(v, addrs) {
+ targetedNodes.Add(v)
+
+ // We inform nodes that ask about the list of targets - helps for nodes
+ // that need to dynamically expand. Note that this only occurs for nodes
+ // that are already directly targeted.
+ if tn, ok := v.(GraphNodeTargetable); ok {
+ tn.SetTargets(addrs)
+ }
+
+ var deps *dag.Set
+ var err error
+ if t.Destroy {
+ deps, err = g.Descendents(v)
+ } else {
+ deps, err = g.Ancestors(v)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ for _, d := range deps.List() {
+ targetedNodes.Add(d)
+ }
+ }
+ }
+
+ return targetedNodes, nil
+}
+
+func (t *TargetsTransformer) nodeIsTarget(
+ v dag.Vertex, addrs []ResourceAddress) bool {
+ r, ok := v.(GraphNodeResource)
+ if !ok {
+ return false
+ }
+
+ addr := r.ResourceAddr()
+ for _, targetAddr := range addrs {
+ if targetAddr.Equals(addr) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// RemovableIfNotTargeted is a special interface for graph nodes that
+// aren't directly addressable, but need to be removed from the graph when they
+// are not targeted. (Nodes that are not directly targeted end up in the set of
+// targeted nodes because something that _is_ targeted depends on them.) The
+// initial use case for this interface is GraphNodeConfigVariable, which was
+// having trouble interpolating for module variables in targeted scenarios that
+// filtered out the resource node being referenced.
+type RemovableIfNotTargeted interface {
+ RemoveIfNotTargeted() bool
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
new file mode 100644
index 00000000..21842789
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
@@ -0,0 +1,20 @@
+package terraform
+
+// TransitiveReductionTransformer is a GraphTransformer that performs
+// finds the transitive reduction of the graph. For a definition of
+// transitive reduction, see Wikipedia.
+type TransitiveReductionTransformer struct{}
+
+func (t *TransitiveReductionTransformer) Transform(g *Graph) error {
+ // If the graph isn't valid, skip the transitive reduction.
+ // We don't error here because Terraform itself handles graph
+ // validation in a better way, or we assume it does.
+ if err := g.Validate(); err != nil {
+ return nil
+ }
+
+ // Do it
+ g.TransitiveReduction()
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
new file mode 100644
index 00000000..b31e2c76
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
@@ -0,0 +1,40 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// RootVariableTransformer is a GraphTransformer that adds all the root
+// variables to the graph.
+//
+// Root variables are currently no-ops but they must be added to the
+// graph since downstream things that depend on them must be able to
+// reach them.
+type RootVariableTransformer struct {
+ Module *module.Tree
+}
+
+func (t *RootVariableTransformer) Transform(g *Graph) error {
+ // If no config, no variables
+ if t.Module == nil {
+ return nil
+ }
+
+ // If we have no vars, we're done!
+ vars := t.Module.Config().Variables
+ if len(vars) == 0 {
+ return nil
+ }
+
+ // Add all variables here
+ for _, v := range vars {
+ node := &NodeRootVariable{
+ Config: v,
+ }
+
+ // Add it!
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
new file mode 100644
index 00000000..6b1293fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
@@ -0,0 +1,44 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// VertexTransformer is a GraphTransformer that transforms vertices
+// using the GraphVertexTransformers. The Transforms are run in sequential
+// order. If a transform replaces a vertex then the next transform will see
+// the new vertex.
+type VertexTransformer struct {
+ Transforms []GraphVertexTransformer
+}
+
+func (t *VertexTransformer) Transform(g *Graph) error {
+ for _, v := range g.Vertices() {
+ for _, vt := range t.Transforms {
+ newV, err := vt.Transform(v)
+ if err != nil {
+ return err
+ }
+
+ // If the vertex didn't change, then don't do anything more
+ if newV == v {
+ continue
+ }
+
+ // Vertex changed, replace it within the graph
+ if ok := g.Replace(v, newV); !ok {
+ // This should never happen, big problem
+ return fmt.Errorf(
+ "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v",
+ dag.VertexName(v), dag.VertexName(newV), v, newV)
+ }
+
+ // Replace v so that future transforms use the proper vertex
+ v = newV
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
new file mode 100644
index 00000000..7c874592
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
@@ -0,0 +1,26 @@
+package terraform
+
+// UIInput is the interface that must be implemented to ask for input
+// from this user. This should forward the request to wherever the user
+// inputs things to ask for values.
+type UIInput interface {
+ Input(*InputOpts) (string, error)
+}
+
+// InputOpts are options for asking for input.
+type InputOpts struct {
+ // Id is a unique ID for the question being asked that might be
+ // used for logging or to look up a prior answered question.
+ Id string
+
+ // Query is a human-friendly question for inputting this value.
+ Query string
+
+ // Description is a description about what this option is. Be wary
+ // that this will probably be in a terminal so split lines as you see
+ // necessary.
+ Description string
+
+ // Default will be the value returned if no data is entered.
+ Default string
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
new file mode 100644
index 00000000..e3a07efa
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
@@ -0,0 +1,23 @@
+package terraform
+
+// MockUIInput is an implementation of UIInput that can be used for tests.
+type MockUIInput struct {
+ InputCalled bool
+ InputOpts *InputOpts
+ InputReturnMap map[string]string
+ InputReturnString string
+ InputReturnError error
+ InputFn func(*InputOpts) (string, error)
+}
+
+func (i *MockUIInput) Input(opts *InputOpts) (string, error) {
+ i.InputCalled = true
+ i.InputOpts = opts
+ if i.InputFn != nil {
+ return i.InputFn(opts)
+ }
+ if i.InputReturnMap != nil {
+ return i.InputReturnMap[opts.Id], i.InputReturnError
+ }
+ return i.InputReturnString, i.InputReturnError
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
new file mode 100644
index 00000000..2207d1d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
@@ -0,0 +1,19 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// PrefixUIInput is an implementation of UIInput that prefixes the ID
+// with a string, allowing queries to be namespaced.
+type PrefixUIInput struct {
+ IdPrefix string
+ QueryPrefix string
+ UIInput UIInput
+}
+
+func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) {
+ opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id)
+ opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query)
+ return i.UIInput.Input(opts)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
new file mode 100644
index 00000000..84427c63
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
@@ -0,0 +1,7 @@
+package terraform
+
+// UIOutput is the interface that must be implemented to output
+// data to the end user.
+type UIOutput interface {
+ Output(string)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
new file mode 100644
index 00000000..135a91c5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
@@ -0,0 +1,9 @@
+package terraform
+
+type CallbackUIOutput struct {
+ OutputFn func(string)
+}
+
+func (o *CallbackUIOutput) Output(v string) {
+ o.OutputFn(v)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
new file mode 100644
index 00000000..7852bc42
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
@@ -0,0 +1,16 @@
+package terraform
+
+// MockUIOutput is an implementation of UIOutput that can be used for tests.
+type MockUIOutput struct {
+ OutputCalled bool
+ OutputMessage string
+ OutputFn func(string)
+}
+
+func (o *MockUIOutput) Output(v string) {
+ o.OutputCalled = true
+ o.OutputMessage = v
+ if o.OutputFn != nil {
+ o.OutputFn(v)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
new file mode 100644
index 00000000..878a0312
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
@@ -0,0 +1,15 @@
+package terraform
+
+// ProvisionerUIOutput is an implementation of UIOutput that calls a hook
+// for the output so that the hooks can handle it.
+type ProvisionerUIOutput struct {
+ Info *InstanceInfo
+ Type string
+ Hooks []Hook
+}
+
+func (o *ProvisionerUIOutput) Output(msg string) {
+ for _, h := range o.Hooks {
+ h.ProvisionOutput(o.Info, o.Type, msg)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go
new file mode 100644
index 00000000..f41f0d7d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/util.go
@@ -0,0 +1,93 @@
+package terraform
+
+import (
+ "sort"
+ "strings"
+)
+
+// Semaphore is a wrapper around a channel to provide
+// utility methods to clarify that we are treating the
+// channel as a semaphore
+type Semaphore chan struct{}
+
+// NewSemaphore creates a semaphore that allows up
+// to a given limit of simultaneous acquisitions
+func NewSemaphore(n int) Semaphore {
+ if n == 0 {
+ panic("semaphore with limit 0")
+ }
+ ch := make(chan struct{}, n)
+ return Semaphore(ch)
+}
+
+// Acquire is used to acquire an available slot.
+// Blocks until available.
+func (s Semaphore) Acquire() {
+ s <- struct{}{}
+}
+
+// TryAcquire is used to do a non-blocking acquire.
+// Returns a bool indicating success
+func (s Semaphore) TryAcquire() bool {
+ select {
+ case s <- struct{}{}:
+ return true
+ default:
+ return false
+ }
+}
+
+// Release is used to return a slot. Acquire must
+// be called as a pre-condition.
+func (s Semaphore) Release() {
+ select {
+ case <-s:
+ default:
+ panic("release without an acquire")
+ }
+}
+
+// resourceProvider returns the provider name for the given type.
+func resourceProvider(t, alias string) string {
+ if alias != "" {
+ return alias
+ }
+
+ idx := strings.IndexRune(t, '_')
+ if idx == -1 {
+ // If no underscores, the resource name is assumed to be
+ // also the provider name, e.g. if the provider exposes
+ // only a single resource of each type.
+ return t
+ }
+
+ return t[:idx]
+}
+
+// strSliceContains checks if a given string is contained in a slice
+// When anybody asks why Go needs generics, here you go.
+func strSliceContains(haystack []string, needle string) bool {
+ for _, s := range haystack {
+ if s == needle {
+ return true
+ }
+ }
+ return false
+}
+
+// deduplicate a slice of strings
+func uniqueStrings(s []string) []string {
+ if len(s) < 2 {
+ return s
+ }
+
+ sort.Strings(s)
+ result := make([]string, 1, len(s))
+ result[0] = s[0]
+ for i := 1; i < len(s); i++ {
+ if s[i] != result[len(result)-1] {
+ result = append(result, s[i])
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go
new file mode 100644
index 00000000..300f2adb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go
@@ -0,0 +1,166 @@
+package terraform
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/hilmapstructure"
+)
+
+// Variables returns the fully loaded set of variables to use with
+// ContextOpts and NewContext, loading any additional variables from
+// the environment or any other sources.
+//
+// The given module tree doesn't need to be loaded.
+func Variables(
+ m *module.Tree,
+ override map[string]interface{}) (map[string]interface{}, error) {
+ result := make(map[string]interface{})
+
+ // Variables are loaded in the following sequence. Each additional step
+ // will override conflicting variable keys from prior steps:
+ //
+ // * Take default values from config
+ // * Take values from TF_VAR_x env vars
+ // * Take values specified in the "override" param which is usually
+ // from -var, -var-file, etc.
+ //
+
+ // First load from the config
+ for _, v := range m.Config().Variables {
+ // If the var has no default, ignore
+ if v.Default == nil {
+ continue
+ }
+
+ // If the type isn't a string, we use it as-is since it is a rich type
+ if v.Type() != config.VariableTypeString {
+ result[v.Name] = v.Default
+ continue
+ }
+
+ // v.Default has already been parsed as HCL but it may be an int type
+ switch typedDefault := v.Default.(type) {
+ case string:
+ if typedDefault == "" {
+ continue
+ }
+ result[v.Name] = typedDefault
+ case int, int64:
+ result[v.Name] = fmt.Sprintf("%d", typedDefault)
+ case float32, float64:
+ result[v.Name] = fmt.Sprintf("%f", typedDefault)
+ case bool:
+ result[v.Name] = fmt.Sprintf("%t", typedDefault)
+ default:
+ panic(fmt.Sprintf(
+ "Unknown default var type: %T\n\n"+
+ "THIS IS A BUG. Please report it.",
+ v.Default))
+ }
+ }
+
+ // Load from env vars
+ for _, v := range os.Environ() {
+ if !strings.HasPrefix(v, VarEnvPrefix) {
+ continue
+ }
+
+ // Strip off the prefix and get the value after the first "="
+ idx := strings.Index(v, "=")
+ k := v[len(VarEnvPrefix):idx]
+ v = v[idx+1:]
+
+ // Override the configuration-default values. Note that *not* finding the variable
+ // in configuration is OK, as we don't want to preclude people from having multiple
+ // sets of TF_VAR_whatever in their environment even if it is a little weird.
+ for _, schema := range m.Config().Variables {
+ if schema.Name != k {
+ continue
+ }
+
+ varType := schema.Type()
+ varVal, err := parseVariableAsHCL(k, v, varType)
+ if err != nil {
+ return nil, err
+ }
+
+ switch varType {
+ case config.VariableTypeMap:
+ if err := varSetMap(result, k, varVal); err != nil {
+ return nil, err
+ }
+ default:
+ result[k] = varVal
+ }
+ }
+ }
+
+ // Load from overrides
+ for k, v := range override {
+ for _, schema := range m.Config().Variables {
+ if schema.Name != k {
+ continue
+ }
+
+ switch schema.Type() {
+ case config.VariableTypeList:
+ result[k] = v
+ case config.VariableTypeMap:
+ if err := varSetMap(result, k, v); err != nil {
+ return nil, err
+ }
+ case config.VariableTypeString:
+ // Convert to a string and set. We don't catch any errors
+ // here because the validation step later should catch
+ // any type errors.
+ var strVal string
+ if err := hilmapstructure.WeakDecode(v, &strVal); err == nil {
+ result[k] = strVal
+ } else {
+ result[k] = v
+ }
+ default:
+ panic(fmt.Sprintf(
+ "Unhandled var type: %T\n\n"+
+ "THIS IS A BUG. Please report it.",
+ schema.Type()))
+ }
+ }
+ }
+
+ return result, nil
+}
+
+// varSetMap sets or merges the map in "v" with the key "k" in the
+// "current" set of variables. This is just a private function to remove
+// duplicate logic in Variables
+func varSetMap(current map[string]interface{}, k string, v interface{}) error {
+ existing, ok := current[k]
+ if !ok {
+ current[k] = v
+ return nil
+ }
+
+ existingMap, ok := existing.(map[string]interface{})
+ if !ok {
+ panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k))
+ }
+
+ switch typedV := v.(type) {
+ case []map[string]interface{}:
+ for newKey, newVal := range typedV[0] {
+ existingMap[newKey] = newVal
+ }
+ case map[string]interface{}:
+ for newKey, newVal := range typedV {
+ existingMap[newKey] = newVal
+ }
+ default:
+ return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v))
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go
new file mode 100644
index 00000000..e184dc5a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/version.go
@@ -0,0 +1,31 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-version"
+)
+
+// The main version number that is being run at the moment.
+const Version = "0.9.4"
+
+// A pre-release marker for the version. If this is "" (empty string)
+// then it means that it is a final release. Otherwise, this is a pre-release
+// such as "dev" (in development), "beta", "rc1", etc.
+const VersionPrerelease = ""
+
+// SemVersion is an instance of version.Version. This has the secondary
+// benefit of verifying during tests and init time that our version is a
+// proper semantic version, which should always be the case.
+var SemVersion = version.Must(version.NewVersion(Version))
+
+// VersionHeader is the header name used to send the current terraform version
+// in http requests.
+const VersionHeader = "Terraform-Version"
+
+func VersionString() string {
+ if VersionPrerelease != "" {
+ return fmt.Sprintf("%s-%s", Version, VersionPrerelease)
+ }
+ return Version
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
new file mode 100644
index 00000000..3cbbf560
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
@@ -0,0 +1,69 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-version"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// checkRequiredVersion verifies that any version requirements specified by
+// the configuration are met.
+//
+// This checks the root module as well as any additional version requirements
+// from child modules.
+//
+// This is tested in context_test.go.
+func checkRequiredVersion(m *module.Tree) error {
+ // Check any children
+ for _, c := range m.Children() {
+ if err := checkRequiredVersion(c); err != nil {
+ return err
+ }
+ }
+
+ var tf *config.Terraform
+ if c := m.Config(); c != nil {
+ tf = c.Terraform
+ }
+
+ // If there is no Terraform config or the required version isn't set,
+ // we move on.
+ if tf == nil || tf.RequiredVersion == "" {
+ return nil
+ }
+
+ // Path for errors
+ module := "root"
+ if path := normalizeModulePath(m.Path()); len(path) > 1 {
+ module = modulePrefixStr(path)
+ }
+
+ // Check this version requirement of this module
+ cs, err := version.NewConstraint(tf.RequiredVersion)
+ if err != nil {
+ return fmt.Errorf(
+ "%s: terraform.required_version %q syntax error: %s",
+ module,
+ tf.RequiredVersion, err)
+ }
+
+ if !cs.Check(SemVersion) {
+ return fmt.Errorf(
+ "The currently running version of Terraform doesn't meet the\n"+
+ "version requirements explicitly specified by the configuration.\n"+
+ "Please use the required version or update the configuration.\n"+
+ "Note that version requirements are usually set for a reason, so\n"+
+ "we recommend verifying with whoever set the version requirements\n"+
+ "prior to making any manual changes.\n\n"+
+ " Module: %s\n"+
+ " Required version: %s\n"+
+ " Current version: %s",
+ module,
+ tf.RequiredVersion,
+ SemVersion)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
new file mode 100644
index 00000000..cbd78dd9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT.
+
+package terraform
+
+import "fmt"
+
+const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport"
+
+var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96}
+
+func (i walkOperation) String() string {
+ if i >= walkOperation(len(_walkOperation_index)-1) {
+ return fmt.Sprintf("walkOperation(%d)", i)
+ }
+ return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE
new file mode 100644
index 00000000..f0e5c79e
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0. \ No newline at end of file
diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md
new file mode 100644
index 00000000..d4db7fc9
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/README.md
@@ -0,0 +1,86 @@
+# Yamux
+
+Yamux (Yet another Multiplexer) is a multiplexing library for Golang.
+It relies on an underlying connection to provide reliability
+and ordering, such as TCP or Unix domain sockets, and provides
+stream-oriented multiplexing. It is inspired by SPDY but is not
+interoperable with it.
+
+Yamux features include:
+
+* Bi-directional streams
+ * Streams can be opened by either client or server
+ * Useful for NAT traversal
+ * Server-side push support
+* Flow control
+ * Avoid starvation
+ * Back-pressure to prevent overwhelming a receiver
+* Keep Alives
+ * Enables persistent connections over a load balancer
+* Efficient
+ * Enables thousands of logical streams with low overhead
+
+## Documentation
+
+For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux).
+
+## Specification
+
+The full specification for Yamux is provided in the `spec.md` file.
+It can be used as a guide to implementors of interoperable libraries.
+
+## Usage
+
+Using Yamux is remarkably simple:
+
+```go
+
+func client() {
+ // Get a TCP connection
+ conn, err := net.Dial(...)
+ if err != nil {
+ panic(err)
+ }
+
+ // Setup client side of yamux
+ session, err := yamux.Client(conn, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ // Open a new stream
+ stream, err := session.Open()
+ if err != nil {
+ panic(err)
+ }
+
+ // Stream implements net.Conn
+ stream.Write([]byte("ping"))
+}
+
+func server() {
+ // Accept a TCP connection
+ conn, err := listener.Accept()
+ if err != nil {
+ panic(err)
+ }
+
+ // Setup server side of yamux
+ session, err := yamux.Server(conn, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ // Accept a stream
+ stream, err := session.Accept()
+ if err != nil {
+ panic(err)
+ }
+
+ // Listen for a message
+ buf := make([]byte, 4)
+ stream.Read(buf)
+}
+
+```
+
diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go
new file mode 100644
index 00000000..be6ebca9
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/addr.go
@@ -0,0 +1,60 @@
+package yamux
+
+import (
+ "fmt"
+ "net"
+)
+
+// hasAddr is used to get the address from the underlying connection
+type hasAddr interface {
+ LocalAddr() net.Addr
+ RemoteAddr() net.Addr
+}
+
+// yamuxAddr is used when we cannot get the underlying address
+type yamuxAddr struct {
+ Addr string
+}
+
+func (*yamuxAddr) Network() string {
+ return "yamux"
+}
+
+func (y *yamuxAddr) String() string {
+ return fmt.Sprintf("yamux:%s", y.Addr)
+}
+
+// Addr is used to get the address of the listener.
+func (s *Session) Addr() net.Addr {
+ return s.LocalAddr()
+}
+
+// LocalAddr is used to get the local address of the
+// underlying connection.
+func (s *Session) LocalAddr() net.Addr {
+ addr, ok := s.conn.(hasAddr)
+ if !ok {
+ return &yamuxAddr{"local"}
+ }
+ return addr.LocalAddr()
+}
+
+// RemoteAddr is used to get the address of remote end
+// of the underlying connection
+func (s *Session) RemoteAddr() net.Addr {
+ addr, ok := s.conn.(hasAddr)
+ if !ok {
+ return &yamuxAddr{"remote"}
+ }
+ return addr.RemoteAddr()
+}
+
+// LocalAddr returns the local address
+func (s *Stream) LocalAddr() net.Addr {
+ return s.session.LocalAddr()
+}
+
+// LocalAddr returns the remote address
+func (s *Stream) RemoteAddr() net.Addr {
+ return s.session.RemoteAddr()
+}
diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go
new file mode 100644
index 00000000..4f529382
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/const.go
@@ -0,0 +1,157 @@
+package yamux
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+var (
+ // ErrInvalidVersion means we received a frame with an
+ // invalid version
+ ErrInvalidVersion = fmt.Errorf("invalid protocol version")
+
+ // ErrInvalidMsgType means we received a frame with an
+ // invalid message type
+ ErrInvalidMsgType = fmt.Errorf("invalid msg type")
+
+ // ErrSessionShutdown is used if there is a shutdown during
+ // an operation
+ ErrSessionShutdown = fmt.Errorf("session shutdown")
+
+ // ErrStreamsExhausted is returned if we have no more
+ // stream ids to issue
+ ErrStreamsExhausted = fmt.Errorf("streams exhausted")
+
+ // ErrDuplicateStream is used if a duplicate stream is
+ // opened inbound
+ ErrDuplicateStream = fmt.Errorf("duplicate stream initiated")
+
+ // ErrReceiveWindowExceeded indicates the window was exceeded
+ ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded")
+
+ // ErrTimeout is used when we reach an IO deadline
+ ErrTimeout = fmt.Errorf("i/o deadline reached")
+
+ // ErrStreamClosed is returned when using a closed stream
+ ErrStreamClosed = fmt.Errorf("stream closed")
+
+ // ErrUnexpectedFlag is set when we get an unexpected flag
+ ErrUnexpectedFlag = fmt.Errorf("unexpected flag")
+
+ // ErrRemoteGoAway is used when we get a go away from the other side
+ ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections")
+
+ // ErrConnectionReset is sent if a stream is reset. This can happen
+ // if the backlog is exceeded, or if there was a remote GoAway.
+ ErrConnectionReset = fmt.Errorf("connection reset")
+
+ // ErrConnectionWriteTimeout indicates that we hit the "safety valve"
+ // timeout writing to the underlying stream connection.
+ ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout")
+
+ // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close
+ ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout")
+)
+
+const (
+ // protoVersion is the only version we support
+ protoVersion uint8 = 0
+)
+
+const (
+ // Data is used for data frames. They are followed
+ // by length bytes worth of payload.
+ typeData uint8 = iota
+
+ // WindowUpdate is used to change the window of
+ // a given stream. The length indicates the delta
+ // update to the window.
+ typeWindowUpdate
+
+ // Ping is sent as a keep-alive or to measure
+ // the RTT. The StreamID and Length value are echoed
+ // back in the response.
+ typePing
+
+ // GoAway is sent to terminate a session. The StreamID
+ // should be 0 and the length is an error code.
+ typeGoAway
+)
+
+const (
+ // SYN is sent to signal a new stream. May
+ // be sent with a data payload
+ flagSYN uint16 = 1 << iota
+
+ // ACK is sent to acknowledge a new stream. May
+ // be sent with a data payload
+ flagACK
+
+ // FIN is sent to half-close the given stream.
+ // May be sent with a data payload.
+ flagFIN
+
+ // RST is used to hard close a given stream.
+ flagRST
+)
+
+const (
+ // initialStreamWindow is the initial stream window size
+ initialStreamWindow uint32 = 256 * 1024
+)
+
+const (
+ // goAwayNormal is sent on a normal termination
+ goAwayNormal uint32 = iota
+
+ // goAwayProtoErr sent on a protocol error
+ goAwayProtoErr
+
+ // goAwayInternalErr sent on an internal error
+ goAwayInternalErr
+)
+
+const (
+ sizeOfVersion = 1
+ sizeOfType = 1
+ sizeOfFlags = 2
+ sizeOfStreamID = 4
+ sizeOfLength = 4
+ headerSize = sizeOfVersion + sizeOfType + sizeOfFlags +
+ sizeOfStreamID + sizeOfLength
+)
+
+type header []byte
+
+func (h header) Version() uint8 {
+ return h[0]
+}
+
+func (h header) MsgType() uint8 {
+ return h[1]
+}
+
+func (h header) Flags() uint16 {
+ return binary.BigEndian.Uint16(h[2:4])
+}
+
+func (h header) StreamID() uint32 {
+ return binary.BigEndian.Uint32(h[4:8])
+}
+
+func (h header) Length() uint32 {
+ return binary.BigEndian.Uint32(h[8:12])
+}
+
+func (h header) String() string {
+ return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d",
+ h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length())
+}
+
+func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) {
+ h[0] = protoVersion
+ h[1] = msgType
+ binary.BigEndian.PutUint16(h[2:4], flags)
+ binary.BigEndian.PutUint32(h[4:8], streamID)
+ binary.BigEndian.PutUint32(h[8:12], length)
+}
diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go
new file mode 100644
index 00000000..7abc7c74
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/mux.go
@@ -0,0 +1,87 @@
+package yamux
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// Config is used to tune the Yamux session
+type Config struct {
+ // AcceptBacklog is used to limit how many streams may be
+ // waiting an accept.
+ AcceptBacklog int
+
+ // EnableKeepalive is used to do a period keep alive
+ // messages using a ping.
+ EnableKeepAlive bool
+
+ // KeepAliveInterval is how often to perform the keep alive
+ KeepAliveInterval time.Duration
+
+ // ConnectionWriteTimeout is meant to be a "safety valve" timeout after
+ // we which will suspect a problem with the underlying connection and
+ // close it. This is only applied to writes, where's there's generally
+ // an expectation that things will move along quickly.
+ ConnectionWriteTimeout time.Duration
+
+ // MaxStreamWindowSize is used to control the maximum
+ // window size that we allow for a stream.
+ MaxStreamWindowSize uint32
+
+ // LogOutput is used to control the log destination
+ LogOutput io.Writer
+}
+
+// DefaultConfig is used to return a default configuration
+func DefaultConfig() *Config {
+ return &Config{
+ AcceptBacklog: 256,
+ EnableKeepAlive: true,
+ KeepAliveInterval: 30 * time.Second,
+ ConnectionWriteTimeout: 10 * time.Second,
+ MaxStreamWindowSize: initialStreamWindow,
+ LogOutput: os.Stderr,
+ }
+}
+
+// VerifyConfig is used to verify the sanity of configuration
+func VerifyConfig(config *Config) error {
+ if config.AcceptBacklog <= 0 {
+ return fmt.Errorf("backlog must be positive")
+ }
+ if config.KeepAliveInterval == 0 {
+ return fmt.Errorf("keep-alive interval must be positive")
+ }
+ if config.MaxStreamWindowSize < initialStreamWindow {
+ return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow)
+ }
+ return nil
+}
+
+// Server is used to initialize a new server-side connection.
+// There must be at most one server-side connection. If a nil config is
+// provided, the DefaultConfiguration will be used.
+func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) {
+ if config == nil {
+ config = DefaultConfig()
+ }
+ if err := VerifyConfig(config); err != nil {
+ return nil, err
+ }
+ return newSession(config, conn, false), nil
+}
+
+// Client is used to initialize a new client-side connection.
+// There must be at most one client-side connection.
+func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) {
+ if config == nil {
+ config = DefaultConfig()
+ }
+
+ if err := VerifyConfig(config); err != nil {
+ return nil, err
+ }
+ return newSession(config, conn, true), nil
+}
diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go
new file mode 100644
index 00000000..e1798183
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/session.go
@@ -0,0 +1,623 @@
+package yamux
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "math"
+ "net"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// Session is used to wrap a reliable ordered connection and to
+// multiplex it into multiple streams.
+type Session struct {
+ // remoteGoAway indicates the remote side does
+ // not want futher connections. Must be first for alignment.
+ remoteGoAway int32
+
+ // localGoAway indicates that we should stop
+ // accepting futher connections. Must be first for alignment.
+ localGoAway int32
+
+ // nextStreamID is the next stream we should
+ // send. This depends if we are a client/server.
+ nextStreamID uint32
+
+ // config holds our configuration
+ config *Config
+
+ // logger is used for our logs
+ logger *log.Logger
+
+ // conn is the underlying connection
+ conn io.ReadWriteCloser
+
+ // bufRead is a buffered reader
+ bufRead *bufio.Reader
+
+ // pings is used to track inflight pings
+ pings map[uint32]chan struct{}
+ pingID uint32
+ pingLock sync.Mutex
+
+ // streams maps a stream id to a stream, and inflight has an entry
+ // for any outgoing stream that has not yet been established. Both are
+ // protected by streamLock.
+ streams map[uint32]*Stream
+ inflight map[uint32]struct{}
+ streamLock sync.Mutex
+
+ // synCh acts like a semaphore. It is sized to the AcceptBacklog which
+ // is assumed to be symmetric between the client and server. This allows
+ // the client to avoid exceeding the backlog and instead blocks the open.
+ synCh chan struct{}
+
+ // acceptCh is used to pass ready streams to the client
+ acceptCh chan *Stream
+
+ // sendCh is used to mark a stream as ready to send,
+ // or to send a header out directly.
+ sendCh chan sendReady
+
+ // recvDoneCh is closed when recv() exits to avoid a race
+ // between stream registration and stream shutdown
+ recvDoneCh chan struct{}
+
+ // shutdown is used to safely close a session
+ shutdown bool
+ shutdownErr error
+ shutdownCh chan struct{}
+ shutdownLock sync.Mutex
+}
+
+// sendReady is used to either mark a stream as ready
+// or to directly send a header
+type sendReady struct {
+ Hdr []byte
+ Body io.Reader
+ Err chan error
+}
+
+// newSession is used to construct a new session
+func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
+ s := &Session{
+ config: config,
+ logger: log.New(config.LogOutput, "", log.LstdFlags),
+ conn: conn,
+ bufRead: bufio.NewReader(conn),
+ pings: make(map[uint32]chan struct{}),
+ streams: make(map[uint32]*Stream),
+ inflight: make(map[uint32]struct{}),
+ synCh: make(chan struct{}, config.AcceptBacklog),
+ acceptCh: make(chan *Stream, config.AcceptBacklog),
+ sendCh: make(chan sendReady, 64),
+ recvDoneCh: make(chan struct{}),
+ shutdownCh: make(chan struct{}),
+ }
+ if client {
+ s.nextStreamID = 1
+ } else {
+ s.nextStreamID = 2
+ }
+ go s.recv()
+ go s.send()
+ if config.EnableKeepAlive {
+ go s.keepalive()
+ }
+ return s
+}
+
+// IsClosed does a safe check to see if we have shutdown
+func (s *Session) IsClosed() bool {
+ select {
+ case <-s.shutdownCh:
+ return true
+ default:
+ return false
+ }
+}
+
+// NumStreams returns the number of currently open streams
+func (s *Session) NumStreams() int {
+ s.streamLock.Lock()
+ num := len(s.streams)
+ s.streamLock.Unlock()
+ return num
+}
+
+// Open is used to create a new stream as a net.Conn
+func (s *Session) Open() (net.Conn, error) {
+ conn, err := s.OpenStream()
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+// OpenStream is used to create a new stream
+func (s *Session) OpenStream() (*Stream, error) {
+ if s.IsClosed() {
+ return nil, ErrSessionShutdown
+ }
+ if atomic.LoadInt32(&s.remoteGoAway) == 1 {
+ return nil, ErrRemoteGoAway
+ }
+
+ // Block if we have too many inflight SYNs
+ select {
+ case s.synCh <- struct{}{}:
+ case <-s.shutdownCh:
+ return nil, ErrSessionShutdown
+ }
+
+GET_ID:
+ // Get an ID, and check for stream exhaustion
+ id := atomic.LoadUint32(&s.nextStreamID)
+ if id >= math.MaxUint32-1 {
+ return nil, ErrStreamsExhausted
+ }
+ if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) {
+ goto GET_ID
+ }
+
+ // Register the stream
+ stream := newStream(s, id, streamInit)
+ s.streamLock.Lock()
+ s.streams[id] = stream
+ s.inflight[id] = struct{}{}
+ s.streamLock.Unlock()
+
+ // Send the window update to create
+ if err := stream.sendWindowUpdate(); err != nil {
+ select {
+ case <-s.synCh:
+ default:
+ s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore")
+ }
+ return nil, err
+ }
+ return stream, nil
+}
+
+// Accept is used to block until the next available stream
+// is ready to be accepted.
+func (s *Session) Accept() (net.Conn, error) {
+ conn, err := s.AcceptStream()
+ if err != nil {
+ return nil, err
+ }
+ return conn, err
+}
+
+// AcceptStream is used to block until the next available stream
+// is ready to be accepted.
+func (s *Session) AcceptStream() (*Stream, error) {
+ select {
+ case stream := <-s.acceptCh:
+ if err := stream.sendWindowUpdate(); err != nil {
+ return nil, err
+ }
+ return stream, nil
+ case <-s.shutdownCh:
+ return nil, s.shutdownErr
+ }
+}
+
+// Close is used to close the session and all streams.
+// Attempts to send a GoAway before closing the connection.
+func (s *Session) Close() error {
+ s.shutdownLock.Lock()
+ defer s.shutdownLock.Unlock()
+
+ if s.shutdown {
+ return nil
+ }
+ s.shutdown = true
+ if s.shutdownErr == nil {
+ s.shutdownErr = ErrSessionShutdown
+ }
+ close(s.shutdownCh)
+ s.conn.Close()
+ <-s.recvDoneCh
+
+ s.streamLock.Lock()
+ defer s.streamLock.Unlock()
+ for _, stream := range s.streams {
+ stream.forceClose()
+ }
+ return nil
+}
+
+// exitErr is used to handle an error that is causing the
+// session to terminate.
+func (s *Session) exitErr(err error) {
+ s.shutdownLock.Lock()
+ if s.shutdownErr == nil {
+ s.shutdownErr = err
+ }
+ s.shutdownLock.Unlock()
+ s.Close()
+}
+
+// GoAway can be used to prevent accepting further
+// connections. It does not close the underlying conn.
+func (s *Session) GoAway() error {
+ return s.waitForSend(s.goAway(goAwayNormal), nil)
+}
+
+// goAway is used to send a goAway message
+func (s *Session) goAway(reason uint32) header {
+ atomic.SwapInt32(&s.localGoAway, 1)
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typeGoAway, 0, 0, reason)
+ return hdr
+}
+
+// Ping is used to measure the RTT response time
+func (s *Session) Ping() (time.Duration, error) {
+ // Get a channel for the ping
+ ch := make(chan struct{})
+
+ // Get a new ping id, mark as pending
+ s.pingLock.Lock()
+ id := s.pingID
+ s.pingID++
+ s.pings[id] = ch
+ s.pingLock.Unlock()
+
+ // Send the ping request
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typePing, flagSYN, 0, id)
+ if err := s.waitForSend(hdr, nil); err != nil {
+ return 0, err
+ }
+
+ // Wait for a response
+ start := time.Now()
+ select {
+ case <-ch:
+ case <-time.After(s.config.ConnectionWriteTimeout):
+ s.pingLock.Lock()
+ delete(s.pings, id) // Ignore it if a response comes later.
+ s.pingLock.Unlock()
+ return 0, ErrTimeout
+ case <-s.shutdownCh:
+ return 0, ErrSessionShutdown
+ }
+
+ // Compute the RTT
+ return time.Now().Sub(start), nil
+}
+
+// keepalive is a long running goroutine that periodically does
+// a ping to keep the connection alive.
+func (s *Session) keepalive() {
+ for {
+ select {
+ case <-time.After(s.config.KeepAliveInterval):
+ _, err := s.Ping()
+ if err != nil {
+ s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
+ s.exitErr(ErrKeepAliveTimeout)
+ return
+ }
+ case <-s.shutdownCh:
+ return
+ }
+ }
+}
+
+// waitForSendErr waits to send a header, checking for a potential shutdown
+func (s *Session) waitForSend(hdr header, body io.Reader) error {
+ errCh := make(chan error, 1)
+ return s.waitForSendErr(hdr, body, errCh)
+}
+
+// waitForSendErr waits to send a header with optional data, checking for a
+// potential shutdown. Since there's the expectation that sends can happen
+// in a timely manner, we enforce the connection write timeout here.
+func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error {
+ timer := time.NewTimer(s.config.ConnectionWriteTimeout)
+ defer timer.Stop()
+
+ ready := sendReady{Hdr: hdr, Body: body, Err: errCh}
+ select {
+ case s.sendCh <- ready:
+ case <-s.shutdownCh:
+ return ErrSessionShutdown
+ case <-timer.C:
+ return ErrConnectionWriteTimeout
+ }
+
+ select {
+ case err := <-errCh:
+ return err
+ case <-s.shutdownCh:
+ return ErrSessionShutdown
+ case <-timer.C:
+ return ErrConnectionWriteTimeout
+ }
+}
+
+// sendNoWait does a send without waiting. Since there's the expectation that
+// the send happens right here, we enforce the connection write timeout if we
+// can't queue the header to be sent.
+func (s *Session) sendNoWait(hdr header) error {
+ timer := time.NewTimer(s.config.ConnectionWriteTimeout)
+ defer timer.Stop()
+
+ select {
+ case s.sendCh <- sendReady{Hdr: hdr}:
+ return nil
+ case <-s.shutdownCh:
+ return ErrSessionShutdown
+ case <-timer.C:
+ return ErrConnectionWriteTimeout
+ }
+}
+
+// send is a long running goroutine that sends data
+func (s *Session) send() {
+ for {
+ select {
+ case ready := <-s.sendCh:
+ // Send a header if ready
+ if ready.Hdr != nil {
+ sent := 0
+ for sent < len(ready.Hdr) {
+ n, err := s.conn.Write(ready.Hdr[sent:])
+ if err != nil {
+ s.logger.Printf("[ERR] yamux: Failed to write header: %v", err)
+ asyncSendErr(ready.Err, err)
+ s.exitErr(err)
+ return
+ }
+ sent += n
+ }
+ }
+
+ // Send data from a body if given
+ if ready.Body != nil {
+ _, err := io.Copy(s.conn, ready.Body)
+ if err != nil {
+ s.logger.Printf("[ERR] yamux: Failed to write body: %v", err)
+ asyncSendErr(ready.Err, err)
+ s.exitErr(err)
+ return
+ }
+ }
+
+ // No error, successful send
+ asyncSendErr(ready.Err, nil)
+ case <-s.shutdownCh:
+ return
+ }
+ }
+}
+
+// recv is a long running goroutine that accepts new data
+func (s *Session) recv() {
+ if err := s.recvLoop(); err != nil {
+ s.exitErr(err)
+ }
+}
+
+// recvLoop continues to receive data until a fatal error is encountered
+func (s *Session) recvLoop() error {
+ defer close(s.recvDoneCh)
+ hdr := header(make([]byte, headerSize))
+ var handler func(header) error
+ for {
+ // Read the header
+ if _, err := io.ReadFull(s.bufRead, hdr); err != nil {
+ if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") {
+ s.logger.Printf("[ERR] yamux: Failed to read header: %v", err)
+ }
+ return err
+ }
+
+ // Verify the version
+ if hdr.Version() != protoVersion {
+ s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version())
+ return ErrInvalidVersion
+ }
+
+ // Switch on the type
+ switch hdr.MsgType() {
+ case typeData:
+ handler = s.handleStreamMessage
+ case typeWindowUpdate:
+ handler = s.handleStreamMessage
+ case typeGoAway:
+ handler = s.handleGoAway
+ case typePing:
+ handler = s.handlePing
+ default:
+ return ErrInvalidMsgType
+ }
+
+ // Invoke the handler
+ if err := handler(hdr); err != nil {
+ return err
+ }
+ }
+}
+
+// handleStreamMessage handles either a data or window update frame
+func (s *Session) handleStreamMessage(hdr header) error {
+ // Check for a new stream creation
+ id := hdr.StreamID()
+ flags := hdr.Flags()
+ if flags&flagSYN == flagSYN {
+ if err := s.incomingStream(id); err != nil {
+ return err
+ }
+ }
+
+ // Get the stream
+ s.streamLock.Lock()
+ stream := s.streams[id]
+ s.streamLock.Unlock()
+
+ // If we do not have a stream, likely we sent a RST
+ if stream == nil {
+ // Drain any data on the wire
+ if hdr.MsgType() == typeData && hdr.Length() > 0 {
+ s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id)
+ if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil {
+ s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err)
+ return nil
+ }
+ } else {
+ s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr)
+ }
+ return nil
+ }
+
+ // Check if this is a window update
+ if hdr.MsgType() == typeWindowUpdate {
+ if err := stream.incrSendWindow(hdr, flags); err != nil {
+ if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
+ s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
+ }
+ return err
+ }
+ return nil
+ }
+
+ // Read the new data
+ if err := stream.readData(hdr, flags, s.bufRead); err != nil {
+ if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
+ s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
+ }
+ return err
+ }
+ return nil
+}
+
+// handlePing is invokde for a typePing frame
+func (s *Session) handlePing(hdr header) error {
+ flags := hdr.Flags()
+ pingID := hdr.Length()
+
+ // Check if this is a query, respond back in a separate context so we
+ // don't interfere with the receiving thread blocking for the write.
+ if flags&flagSYN == flagSYN {
+ go func() {
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typePing, flagACK, 0, pingID)
+ if err := s.sendNoWait(hdr); err != nil {
+ s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err)
+ }
+ }()
+ return nil
+ }
+
+ // Handle a response
+ s.pingLock.Lock()
+ ch := s.pings[pingID]
+ if ch != nil {
+ delete(s.pings, pingID)
+ close(ch)
+ }
+ s.pingLock.Unlock()
+ return nil
+}
+
+// handleGoAway is invokde for a typeGoAway frame
+func (s *Session) handleGoAway(hdr header) error {
+ code := hdr.Length()
+ switch code {
+ case goAwayNormal:
+ atomic.SwapInt32(&s.remoteGoAway, 1)
+ case goAwayProtoErr:
+ s.logger.Printf("[ERR] yamux: received protocol error go away")
+ return fmt.Errorf("yamux protocol error")
+ case goAwayInternalErr:
+ s.logger.Printf("[ERR] yamux: received internal error go away")
+ return fmt.Errorf("remote yamux internal error")
+ default:
+ s.logger.Printf("[ERR] yamux: received unexpected go away")
+ return fmt.Errorf("unexpected go away received")
+ }
+ return nil
+}
+
+// incomingStream is used to create a new incoming stream
+func (s *Session) incomingStream(id uint32) error {
+ // Reject immediately if we are doing a go away
+ if atomic.LoadInt32(&s.localGoAway) == 1 {
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typeWindowUpdate, flagRST, id, 0)
+ return s.sendNoWait(hdr)
+ }
+
+ // Allocate a new stream
+ stream := newStream(s, id, streamSYNReceived)
+
+ s.streamLock.Lock()
+ defer s.streamLock.Unlock()
+
+ // Check if stream already exists
+ if _, ok := s.streams[id]; ok {
+ s.logger.Printf("[ERR] yamux: duplicate stream declared")
+ if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
+ s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
+ }
+ return ErrDuplicateStream
+ }
+
+ // Register the stream
+ s.streams[id] = stream
+
+ // Check if we've exceeded the backlog
+ select {
+ case s.acceptCh <- stream:
+ return nil
+ default:
+ // Backlog exceeded! RST the stream
+ s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset")
+ delete(s.streams, id)
+ stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0)
+ return s.sendNoWait(stream.sendHdr)
+ }
+}
+
+// closeStream is used to close a stream once both sides have
+// issued a close. If there was an in-flight SYN and the stream
+// was not yet established, then this will give the credit back.
+func (s *Session) closeStream(id uint32) {
+ s.streamLock.Lock()
+ if _, ok := s.inflight[id]; ok {
+ select {
+ case <-s.synCh:
+ default:
+ s.logger.Printf("[ERR] yamux: SYN tracking out of sync")
+ }
+ }
+ delete(s.streams, id)
+ s.streamLock.Unlock()
+}
+
+// establishStream is used to mark a stream that was in the
+// SYN Sent state as established.
+func (s *Session) establishStream(id uint32) {
+ s.streamLock.Lock()
+ if _, ok := s.inflight[id]; ok {
+ delete(s.inflight, id)
+ } else {
+ s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)")
+ }
+ select {
+ case <-s.synCh:
+ default:
+ s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)")
+ }
+ s.streamLock.Unlock()
+}
diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go
new file mode 100644
index 00000000..d216e281
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/stream.go
@@ -0,0 +1,457 @@
+package yamux
+
+import (
+ "bytes"
+ "io"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type streamState int
+
+const (
+ streamInit streamState = iota
+ streamSYNSent
+ streamSYNReceived
+ streamEstablished
+ streamLocalClose
+ streamRemoteClose
+ streamClosed
+ streamReset
+)
+
+// Stream is used to represent a logical stream
+// within a session.
+type Stream struct {
+ recvWindow uint32
+ sendWindow uint32
+
+ id uint32
+ session *Session
+
+ state streamState
+ stateLock sync.Mutex
+
+ recvBuf *bytes.Buffer
+ recvLock sync.Mutex
+
+ controlHdr header
+ controlErr chan error
+ controlHdrLock sync.Mutex
+
+ sendHdr header
+ sendErr chan error
+ sendLock sync.Mutex
+
+ recvNotifyCh chan struct{}
+ sendNotifyCh chan struct{}
+
+ readDeadline time.Time
+ writeDeadline time.Time
+}
+
+// newStream is used to construct a new stream within
+// a given session for an ID
+func newStream(session *Session, id uint32, state streamState) *Stream {
+ s := &Stream{
+ id: id,
+ session: session,
+ state: state,
+ controlHdr: header(make([]byte, headerSize)),
+ controlErr: make(chan error, 1),
+ sendHdr: header(make([]byte, headerSize)),
+ sendErr: make(chan error, 1),
+ recvWindow: initialStreamWindow,
+ sendWindow: initialStreamWindow,
+ recvNotifyCh: make(chan struct{}, 1),
+ sendNotifyCh: make(chan struct{}, 1),
+ }
+ return s
+}
+
+// Session returns the associated stream session
+func (s *Stream) Session() *Session {
+ return s.session
+}
+
+// StreamID returns the ID of this stream
+func (s *Stream) StreamID() uint32 {
+ return s.id
+}
+
+// Read is used to read from the stream
+func (s *Stream) Read(b []byte) (n int, err error) {
+ defer asyncNotify(s.recvNotifyCh)
+START:
+ s.stateLock.Lock()
+ switch s.state {
+ case streamLocalClose:
+ fallthrough
+ case streamRemoteClose:
+ fallthrough
+ case streamClosed:
+ s.recvLock.Lock()
+ if s.recvBuf == nil || s.recvBuf.Len() == 0 {
+ s.recvLock.Unlock()
+ s.stateLock.Unlock()
+ return 0, io.EOF
+ }
+ s.recvLock.Unlock()
+ case streamReset:
+ s.stateLock.Unlock()
+ return 0, ErrConnectionReset
+ }
+ s.stateLock.Unlock()
+
+ // If there is no data available, block
+ s.recvLock.Lock()
+ if s.recvBuf == nil || s.recvBuf.Len() == 0 {
+ s.recvLock.Unlock()
+ goto WAIT
+ }
+
+ // Read any bytes
+ n, _ = s.recvBuf.Read(b)
+ s.recvLock.Unlock()
+
+ // Send a window update potentially
+ err = s.sendWindowUpdate()
+ return n, err
+
+WAIT:
+ var timeout <-chan time.Time
+ var timer *time.Timer
+ if !s.readDeadline.IsZero() {
+ delay := s.readDeadline.Sub(time.Now())
+ timer = time.NewTimer(delay)
+ timeout = timer.C
+ }
+ select {
+ case <-s.recvNotifyCh:
+ if timer != nil {
+ timer.Stop()
+ }
+ goto START
+ case <-timeout:
+ return 0, ErrTimeout
+ }
+}
+
+// Write is used to write to the stream
+func (s *Stream) Write(b []byte) (n int, err error) {
+ s.sendLock.Lock()
+ defer s.sendLock.Unlock()
+ total := 0
+ for total < len(b) {
+ n, err := s.write(b[total:])
+ total += n
+ if err != nil {
+ return total, err
+ }
+ }
+ return total, nil
+}
+
+// write is used to write to the stream, may return on
+// a short write.
+func (s *Stream) write(b []byte) (n int, err error) {
+ var flags uint16
+ var max uint32
+ var body io.Reader
+START:
+ s.stateLock.Lock()
+ switch s.state {
+ case streamLocalClose:
+ fallthrough
+ case streamClosed:
+ s.stateLock.Unlock()
+ return 0, ErrStreamClosed
+ case streamReset:
+ s.stateLock.Unlock()
+ return 0, ErrConnectionReset
+ }
+ s.stateLock.Unlock()
+
+ // If there is no data available, block
+ window := atomic.LoadUint32(&s.sendWindow)
+ if window == 0 {
+ goto WAIT
+ }
+
+ // Determine the flags if any
+ flags = s.sendFlags()
+
+ // Send up to our send window
+ max = min(window, uint32(len(b)))
+ body = bytes.NewReader(b[:max])
+
+ // Send the header
+ s.sendHdr.encode(typeData, flags, s.id, max)
+ if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil {
+ return 0, err
+ }
+
+ // Reduce our send window
+ atomic.AddUint32(&s.sendWindow, ^uint32(max-1))
+
+ // Unlock
+ return int(max), err
+
+WAIT:
+ var timeout <-chan time.Time
+ if !s.writeDeadline.IsZero() {
+ delay := s.writeDeadline.Sub(time.Now())
+ timeout = time.After(delay)
+ }
+ select {
+ case <-s.sendNotifyCh:
+ goto START
+ case <-timeout:
+ return 0, ErrTimeout
+ }
+ return 0, nil
+}
+
+// sendFlags determines any flags that are appropriate
+// based on the current stream state
+func (s *Stream) sendFlags() uint16 {
+ s.stateLock.Lock()
+ defer s.stateLock.Unlock()
+ var flags uint16
+ switch s.state {
+ case streamInit:
+ flags |= flagSYN
+ s.state = streamSYNSent
+ case streamSYNReceived:
+ flags |= flagACK
+ s.state = streamEstablished
+ }
+ return flags
+}
+
+// sendWindowUpdate potentially sends a window update enabling
+// further writes to take place. Must be invoked with the lock.
+func (s *Stream) sendWindowUpdate() error {
+ s.controlHdrLock.Lock()
+ defer s.controlHdrLock.Unlock()
+
+ // Determine the delta update
+ max := s.session.config.MaxStreamWindowSize
+ delta := max - atomic.LoadUint32(&s.recvWindow)
+
+ // Determine the flags if any
+ flags := s.sendFlags()
+
+ // Check if we can omit the update
+ if delta < (max/2) && flags == 0 {
+ return nil
+ }
+
+ // Update our window
+ atomic.AddUint32(&s.recvWindow, delta)
+
+ // Send the header
+ s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta)
+ if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
+ return err
+ }
+ return nil
+}
+
+// sendClose is used to send a FIN
+func (s *Stream) sendClose() error {
+ s.controlHdrLock.Lock()
+ defer s.controlHdrLock.Unlock()
+
+ flags := s.sendFlags()
+ flags |= flagFIN
+ s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0)
+ if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Close is used to close the stream
+func (s *Stream) Close() error {
+ closeStream := false
+ s.stateLock.Lock()
+ switch s.state {
+ // Opened means we need to signal a close
+ case streamSYNSent:
+ fallthrough
+ case streamSYNReceived:
+ fallthrough
+ case streamEstablished:
+ s.state = streamLocalClose
+ goto SEND_CLOSE
+
+ case streamLocalClose:
+ case streamRemoteClose:
+ s.state = streamClosed
+ closeStream = true
+ goto SEND_CLOSE
+
+ case streamClosed:
+ case streamReset:
+ default:
+ panic("unhandled state")
+ }
+ s.stateLock.Unlock()
+ return nil
+SEND_CLOSE:
+ s.stateLock.Unlock()
+ s.sendClose()
+ s.notifyWaiting()
+ if closeStream {
+ s.session.closeStream(s.id)
+ }
+ return nil
+}
+
+// forceClose is used for when the session is exiting
+func (s *Stream) forceClose() {
+ s.stateLock.Lock()
+ s.state = streamClosed
+ s.stateLock.Unlock()
+ s.notifyWaiting()
+}
+
+// processFlags is used to update the state of the stream
+// based on set flags, if any. Lock must be held
+func (s *Stream) processFlags(flags uint16) error {
+ // Close the stream without holding the state lock
+ closeStream := false
+ defer func() {
+ if closeStream {
+ s.session.closeStream(s.id)
+ }
+ }()
+
+ s.stateLock.Lock()
+ defer s.stateLock.Unlock()
+ if flags&flagACK == flagACK {
+ if s.state == streamSYNSent {
+ s.state = streamEstablished
+ }
+ s.session.establishStream(s.id)
+ }
+ if flags&flagFIN == flagFIN {
+ switch s.state {
+ case streamSYNSent:
+ fallthrough
+ case streamSYNReceived:
+ fallthrough
+ case streamEstablished:
+ s.state = streamRemoteClose
+ s.notifyWaiting()
+ case streamLocalClose:
+ s.state = streamClosed
+ closeStream = true
+ s.notifyWaiting()
+ default:
+ s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state)
+ return ErrUnexpectedFlag
+ }
+ }
+ if flags&flagRST == flagRST {
+ s.state = streamReset
+ closeStream = true
+ s.notifyWaiting()
+ }
+ return nil
+}
+
+// notifyWaiting notifies all the waiting channels
+func (s *Stream) notifyWaiting() {
+ asyncNotify(s.recvNotifyCh)
+ asyncNotify(s.sendNotifyCh)
+}
+
+// incrSendWindow updates the size of our send window
+func (s *Stream) incrSendWindow(hdr header, flags uint16) error {
+ if err := s.processFlags(flags); err != nil {
+ return err
+ }
+
+ // Increase window, unblock a sender
+ atomic.AddUint32(&s.sendWindow, hdr.Length())
+ asyncNotify(s.sendNotifyCh)
+ return nil
+}
+
+// readData is used to handle a data frame
+func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
+ if err := s.processFlags(flags); err != nil {
+ return err
+ }
+
+ // Check that our recv window is not exceeded
+ length := hdr.Length()
+ if length == 0 {
+ return nil
+ }
+ if remain := atomic.LoadUint32(&s.recvWindow); length > remain {
+ s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length)
+ return ErrRecvWindowExceeded
+ }
+
+ // Wrap in a limited reader
+ conn = &io.LimitedReader{R: conn, N: int64(length)}
+
+ // Copy into buffer
+ s.recvLock.Lock()
+ if s.recvBuf == nil {
+ // Allocate the receive buffer just-in-time to fit the full data frame.
+ // This way we can read in the whole packet without further allocations.
+ s.recvBuf = bytes.NewBuffer(make([]byte, 0, length))
+ }
+ if _, err := io.Copy(s.recvBuf, conn); err != nil {
+ s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err)
+ s.recvLock.Unlock()
+ return err
+ }
+
+ // Decrement the receive window
+ atomic.AddUint32(&s.recvWindow, ^uint32(length-1))
+ s.recvLock.Unlock()
+
+ // Unblock any readers
+ asyncNotify(s.recvNotifyCh)
+ return nil
+}
+
+// SetDeadline sets the read and write deadlines
+func (s *Stream) SetDeadline(t time.Time) error {
+ if err := s.SetReadDeadline(t); err != nil {
+ return err
+ }
+ if err := s.SetWriteDeadline(t); err != nil {
+ return err
+ }
+ return nil
+}
+
+// SetReadDeadline sets the deadline for future Read calls.
+func (s *Stream) SetReadDeadline(t time.Time) error {
+ s.readDeadline = t
+ return nil
+}
+
+// SetWriteDeadline sets the deadline for future Write calls
+func (s *Stream) SetWriteDeadline(t time.Time) error {
+ s.writeDeadline = t
+ return nil
+}
+
+// Shrink is used to compact the amount of buffers utilized
+// This is useful when using Yamux in a connection pool to reduce
+// the idle memory utilization.
+func (s *Stream) Shrink() {
+ s.recvLock.Lock()
+ if s.recvBuf != nil && s.recvBuf.Len() == 0 {
+ s.recvBuf = nil
+ }
+ s.recvLock.Unlock()
+}
diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go
new file mode 100644
index 00000000..5fe45afc
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/util.go
@@ -0,0 +1,28 @@
+package yamux
+
+// asyncSendErr is used to try an async send of an error
+func asyncSendErr(ch chan error, err error) {
+ if ch == nil {
+ return
+ }
+ select {
+ case ch <- err:
+ default:
+ }
+}
+
+// asyncNotify is used to signal a waiting goroutine
+func asyncNotify(ch chan struct{}) {
+ select {
+ case ch <- struct{}{}:
+ default:
+ }
+}
+
+// min computes the minimum of two values
+func min(a, b uint32) uint32 {
+ if a < b {
+ return a
+ }
+ return b
+}