summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
authorFlavio Castelli <fcastelli@suse.com>2017-05-03 11:37:08 +0200
committerAlvaro <alvaro.saurin@gmail.com>2017-05-03 11:40:31 +0200
commitee12004ab93e54f326896e9909ba9e6a2bd11e89 (patch)
tree1ea30d204b04425ebd1dadaf8cc991d572c7f0fb /vendor
parenta286dc5494691c2b04c48ef6695ed0c902912c0f (diff)
downloadterraform-provider-libvirt-ee12004ab93e54f326896e9909ba9e6a2bd11e89.tar
terraform-provider-libvirt-ee12004ab93e54f326896e9909ba9e6a2bd11e89.tar.gz
Vendor dependencies with vndr
This fixes issue #123
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/apparentlymart/go-cidr/LICENSE19
-rw-r--r--vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go112
-rw-r--r--vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go38
-rw-r--r--vendor/github.com/aws/aws-sdk-go/LICENSE.txt202
-rw-r--r--vendor/github.com/aws/aws-sdk-go/NOTICE.txt3
-rw-r--r--vendor/github.com/aws/aws-sdk-go/README.md449
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go145
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go194
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go108
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go27
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go222
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go113
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go89
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/client.go149
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go96
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/config.go470
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/context.go71
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go41
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go9
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/convert_types.go369
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go226
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go17
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go102
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go246
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go178
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go191
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go78
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go151
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go57
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go298
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go163
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go162
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go124
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go133
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go2149
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go66
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go439
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go303
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go337
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/errors.go17
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/logger.go112
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go19
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_appengine.go11
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go225
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go24
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go58
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request.go575
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go21
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go9
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go14
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go14
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go236
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go154
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go94
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/validation.go234
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go287
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/doc.go274
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go208
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/session.go590
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go295
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go82
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go7
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go24
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go761
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/types.go118
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/url.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go29
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/version.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/README.md4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go75
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go36
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go237
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go35
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go66
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go290
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go45
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go227
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go69
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go21
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go296
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go260
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go147
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/api.go19245
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go106
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go36
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go46
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/doc.go78
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go109
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/errors.go48
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go162
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go28
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/service.go93
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/sse.go44
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go35
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go103
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go214
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/api.go2365
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/doc.go124
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/errors.go73
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/service.go93
-rw-r--r--vendor/github.com/bgentry/go-netrc/LICENSE20
-rw-r--r--vendor/github.com/bgentry/go-netrc/README.md9
-rw-r--r--vendor/github.com/bgentry/go-netrc/netrc/netrc.go510
-rw-r--r--vendor/github.com/c4milo/gotoolkit/LICENSE374
-rw-r--r--vendor/github.com/c4milo/gotoolkit/README.md12
-rw-r--r--vendor/github.com/c4milo/gotoolkit/queue.go114
-rw-r--r--vendor/github.com/c4milo/gotoolkit/stack.go110
-rw-r--r--vendor/github.com/coreos/go-semver/LICENSE202
-rw-r--r--vendor/github.com/coreos/go-semver/README.md28
-rw-r--r--vendor/github.com/coreos/go-semver/semver/semver.go275
-rw-r--r--vendor/github.com/coreos/go-semver/semver/sort.go38
-rw-r--r--vendor/github.com/coreos/go-systemd/LICENSE191
-rw-r--r--vendor/github.com/coreos/go-systemd/README.md54
-rw-r--r--vendor/github.com/coreos/go-systemd/unit/deserialize.go276
-rw-r--r--vendor/github.com/coreos/go-systemd/unit/escape.go116
-rw-r--r--vendor/github.com/coreos/go-systemd/unit/option.go54
-rw-r--r--vendor/github.com/coreos/go-systemd/unit/serialize.go75
-rw-r--r--vendor/github.com/coreos/ignition/LICENSE202
-rw-r--r--vendor/github.com/coreos/ignition/NOTICE5
-rw-r--r--vendor/github.com/coreos/ignition/README.md12
-rw-r--r--vendor/github.com/coreos/ignition/config/types/config.go80
-rw-r--r--vendor/github.com/coreos/ignition/config/types/directory.go30
-rw-r--r--vendor/github.com/coreos/ignition/config/types/disk.go126
-rw-r--r--vendor/github.com/coreos/ignition/config/types/file.go62
-rw-r--r--vendor/github.com/coreos/ignition/config/types/filesystem.go74
-rw-r--r--vendor/github.com/coreos/ignition/config/types/ignition.go59
-rw-r--r--vendor/github.com/coreos/ignition/config/types/link.go35
-rw-r--r--vendor/github.com/coreos/ignition/config/types/mode.go30
-rw-r--r--vendor/github.com/coreos/ignition/config/types/node.go56
-rw-r--r--vendor/github.com/coreos/ignition/config/types/partition.go73
-rw-r--r--vendor/github.com/coreos/ignition/config/types/path.go31
-rw-r--r--vendor/github.com/coreos/ignition/config/types/raid.go58
-rw-r--r--vendor/github.com/coreos/ignition/config/types/schema.go200
-rw-r--r--vendor/github.com/coreos/ignition/config/types/unit.go109
-rw-r--r--vendor/github.com/coreos/ignition/config/types/url.go49
-rw-r--r--vendor/github.com/coreos/ignition/config/types/verification.go83
-rw-r--r--vendor/github.com/coreos/ignition/config/validate/report/report.go158
-rw-r--r--vendor/github.com/coreos/ignition/internal/exec/util/device_alias.go55
-rw-r--r--vendor/github.com/coreos/ignition/internal/exec/util/file.go235
-rw-r--r--vendor/github.com/coreos/ignition/internal/exec/util/passwd.go191
-rw-r--r--vendor/github.com/coreos/ignition/internal/exec/util/path.go31
-rw-r--r--vendor/github.com/coreos/ignition/internal/exec/util/unit.go80
-rw-r--r--vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.c139
-rw-r--r--vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.go50
-rw-r--r--vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.h23
-rw-r--r--vendor/github.com/coreos/ignition/internal/exec/util/util.go32
-rw-r--r--vendor/github.com/coreos/ignition/internal/exec/util/verification.go81
-rw-r--r--vendor/github.com/davecgh/go-spew/LICENSE15
-rw-r--r--vendor/github.com/davecgh/go-spew/README.md205
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypass.go152
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypasssafe.go38
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/common.go341
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/config.go306
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/doc.go211
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/dump.go509
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/format.go419
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/spew.go148
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/LICENSE21
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/README.md49
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/cfuncs.go109
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/constants.go473
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/domain.go784
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/error.go504
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/events.go467
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/interface.go87
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/libvirt.go811
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/network.go187
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/nodeinfo.go50
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/nwfilter.go70
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/secret.go78
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/snapshot.go95
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/storage_pool.go210
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/storage_volume.go144
-rw-r--r--vendor/github.com/dmacvicar/libvirt-go/stream.go79
-rw-r--r--vendor/github.com/go-ini/ini/LICENSE191
-rw-r--r--vendor/github.com/go-ini/ini/README.md740
-rw-r--r--vendor/github.com/go-ini/ini/README_ZH.md727
-rw-r--r--vendor/github.com/go-ini/ini/error.go32
-rw-r--r--vendor/github.com/go-ini/ini/ini.go556
-rw-r--r--vendor/github.com/go-ini/ini/key.go699
-rw-r--r--vendor/github.com/go-ini/ini/parser.go361
-rw-r--r--vendor/github.com/go-ini/ini/section.go248
-rw-r--r--vendor/github.com/go-ini/ini/struct.go450
-rw-r--r--vendor/github.com/hashicorp/errwrap/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/errwrap/README.md89
-rw-r--r--vendor/github.com/hashicorp/errwrap/errwrap.go169
-rw-r--r--vendor/github.com/hashicorp/go-getter/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/go-getter/README.md253
-rw-r--r--vendor/github.com/hashicorp/go-getter/client.go335
-rw-r--r--vendor/github.com/hashicorp/go-getter/client_mode.go24
-rw-r--r--vendor/github.com/hashicorp/go-getter/copy_dir.go78
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress.go29
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_bzip2.go45
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_gzip.go49
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_tar.go83
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_tbz2.go33
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_testing.go135
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_tgz.go39
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_zip.go96
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect.go97
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_bitbucket.go66
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_file.go67
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_github.go73
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_s3.go61
-rw-r--r--vendor/github.com/hashicorp/go-getter/folder_storage.go65
-rw-r--r--vendor/github.com/hashicorp/go-getter/get.go139
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file.go32
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file_unix.go103
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file_windows.go120
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_git.go225
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_hg.go131
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_http.go227
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_mock.go52
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_s3.go243
-rw-r--r--vendor/github.com/hashicorp/go-getter/helper/url/url.go14
-rw-r--r--vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go11
-rw-r--r--vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go40
-rw-r--r--vendor/github.com/hashicorp/go-getter/netrc.go67
-rw-r--r--vendor/github.com/hashicorp/go-getter/source.go36
-rw-r--r--vendor/github.com/hashicorp/go-getter/storage.go13
-rw-r--r--vendor/github.com/hashicorp/go-multierror/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/go-multierror/README.md97
-rw-r--r--vendor/github.com/hashicorp/go-multierror/append.go41
-rw-r--r--vendor/github.com/hashicorp/go-multierror/flatten.go26
-rw-r--r--vendor/github.com/hashicorp/go-multierror/format.go27
-rw-r--r--vendor/github.com/hashicorp/go-multierror/multierror.go51
-rw-r--r--vendor/github.com/hashicorp/go-multierror/prefix.go37
-rw-r--r--vendor/github.com/hashicorp/go-plugin/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/go-plugin/README.md160
-rw-r--r--vendor/github.com/hashicorp/go-plugin/client.go666
-rw-r--r--vendor/github.com/hashicorp/go-plugin/discover.go28
-rw-r--r--vendor/github.com/hashicorp/go-plugin/error.go24
-rw-r--r--vendor/github.com/hashicorp/go-plugin/mux_broker.go204
-rw-r--r--vendor/github.com/hashicorp/go-plugin/plugin.go25
-rw-r--r--vendor/github.com/hashicorp/go-plugin/process.go24
-rw-r--r--vendor/github.com/hashicorp/go-plugin/process_posix.go19
-rw-r--r--vendor/github.com/hashicorp/go-plugin/process_windows.go29
-rw-r--r--vendor/github.com/hashicorp/go-plugin/rpc_client.go123
-rw-r--r--vendor/github.com/hashicorp/go-plugin/rpc_server.go185
-rw-r--r--vendor/github.com/hashicorp/go-plugin/server.go235
-rw-r--r--vendor/github.com/hashicorp/go-plugin/server_mux.go31
-rw-r--r--vendor/github.com/hashicorp/go-plugin/stream.go18
-rw-r--r--vendor/github.com/hashicorp/go-plugin/testing.go76
-rw-r--r--vendor/github.com/hashicorp/go-uuid/LICENSE363
-rw-r--r--vendor/github.com/hashicorp/go-uuid/README.md8
-rw-r--r--vendor/github.com/hashicorp/go-uuid/uuid.go65
-rw-r--r--vendor/github.com/hashicorp/go-version/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/go-version/README.md65
-rw-r--r--vendor/github.com/hashicorp/go-version/constraint.go178
-rw-r--r--vendor/github.com/hashicorp/go-version/version.go322
-rw-r--r--vendor/github.com/hashicorp/go-version/version_collection.go17
-rw-r--r--vendor/github.com/hashicorp/hcl/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/hcl/README.md125
-rw-r--r--vendor/github.com/hashicorp/hcl/decoder.go724
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl.go11
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/ast/ast.go219
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/ast/walk.go52
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/parser/error.go17
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/parser/parser.go514
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go651
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go241
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/token/position.go46
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/token/token.go219
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/flatten.go117
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/parser.go313
-rw-r--r--vendor/github.com/hashicorp/hcl/json/scanner/scanner.go451
-rw-r--r--vendor/github.com/hashicorp/hcl/json/token/position.go46
-rw-r--r--vendor/github.com/hashicorp/hcl/json/token/token.go118
-rw-r--r--vendor/github.com/hashicorp/hcl/lex.go38
-rw-r--r--vendor/github.com/hashicorp/hcl/parse.go39
-rw-r--r--vendor/github.com/hashicorp/hil/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/hil/README.md102
-rw-r--r--vendor/github.com/hashicorp/hil/ast/arithmetic.go43
-rw-r--r--vendor/github.com/hashicorp/hil/ast/arithmetic_op.go24
-rw-r--r--vendor/github.com/hashicorp/hil/ast/ast.go99
-rw-r--r--vendor/github.com/hashicorp/hil/ast/call.go47
-rw-r--r--vendor/github.com/hashicorp/hil/ast/conditional.go36
-rw-r--r--vendor/github.com/hashicorp/hil/ast/index.go76
-rw-r--r--vendor/github.com/hashicorp/hil/ast/literal.go88
-rw-r--r--vendor/github.com/hashicorp/hil/ast/output.go78
-rw-r--r--vendor/github.com/hashicorp/hil/ast/scope.go90
-rw-r--r--vendor/github.com/hashicorp/hil/ast/stack.go25
-rw-r--r--vendor/github.com/hashicorp/hil/ast/type_string.go54
-rw-r--r--vendor/github.com/hashicorp/hil/ast/unknown.go30
-rw-r--r--vendor/github.com/hashicorp/hil/ast/variable_access.go36
-rw-r--r--vendor/github.com/hashicorp/hil/ast/variables_helper.go63
-rw-r--r--vendor/github.com/hashicorp/hil/builtins.go331
-rw-r--r--vendor/github.com/hashicorp/hil/check_identifier.go88
-rw-r--r--vendor/github.com/hashicorp/hil/check_types.go662
-rw-r--r--vendor/github.com/hashicorp/hil/convert.go159
-rw-r--r--vendor/github.com/hashicorp/hil/eval.go472
-rw-r--r--vendor/github.com/hashicorp/hil/eval_type.go16
-rw-r--r--vendor/github.com/hashicorp/hil/evaltype_string.go42
-rw-r--r--vendor/github.com/hashicorp/hil/parse.go29
-rw-r--r--vendor/github.com/hashicorp/hil/parser/binary_op.go45
-rw-r--r--vendor/github.com/hashicorp/hil/parser/error.go38
-rw-r--r--vendor/github.com/hashicorp/hil/parser/fuzz.go28
-rw-r--r--vendor/github.com/hashicorp/hil/parser/parser.go522
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/peeker.go55
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/scanner.go550
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/token.go105
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/tokentype_string.go51
-rw-r--r--vendor/github.com/hashicorp/hil/transform_fixed.go29
-rw-r--r--vendor/github.com/hashicorp/hil/walk.go266
-rw-r--r--vendor/github.com/hashicorp/logutils/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/logutils/README.md36
-rw-r--r--vendor/github.com/hashicorp/logutils/level.go81
-rw-r--r--vendor/github.com/hashicorp/terraform/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/terraform/README.md164
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/provider.go239
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_config.go308
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_disk.go99
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_file.go178
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_filesystem.go122
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_group.go57
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_networkd_unit.go60
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_raid.go69
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_systemd_unit.go104
-rw-r--r--vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_user.go126
-rw-r--r--vendor/github.com/hashicorp/terraform/config/append.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config.go1096
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_string.go338
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_terraform.go117
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_tree.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/config/import_tree.go113
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate.go386
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go1346
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_walk.go283
-rw-r--r--vendor/github.com/hashicorp/terraform/config/lang.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/config/loader.go224
-rw-r--r--vendor/github.com/hashicorp/terraform/config/loader_hcl.go1091
-rw-r--r--vendor/github.com/hashicorp/terraform/config/merge.go193
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/copy_dir.go114
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/get.go71
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode_windows.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/module.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/testing.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/tree.go428
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/tree_gob.go57
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go118
-rw-r--r--vendor/github.com/hashicorp/terraform/config/provisioner_enums.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/config/raw_config.go335
-rw-r--r--vendor/github.com/hashicorp/terraform/config/resource_mode.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/config/resource_mode_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/config/testing.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/dag.go286
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/dot.go282
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/edge.go37
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/graph.go391
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/marshal.go462
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/set.go109
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/tarjan.go107
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/walk.go445
-rw-r--r--vendor/github.com/hashicorp/terraform/flatmap/expand.go147
-rw-r--r--vendor/github.com/hashicorp/terraform/flatmap/flatten.go71
-rw-r--r--vendor/github.com/hashicorp/terraform/flatmap/map.go82
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/README.md7
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/config/decode.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/config/validator.go214
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go154
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/experiment/id.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/logging/logging.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/error.go79
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/id.go39
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/map.go140
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/resource.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/state.go259
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing.go790
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go160
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go141
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/wait.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/README.md11
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/backend.go94
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/equal.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go334
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go333
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go208
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go232
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go319
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provider.go400
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go180
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource.go478
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go502
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go237
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/schema.go1537
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/serialize.go122
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/set.go209
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/testing.go30
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/closer.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go128
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go151
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go66
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/value.go79
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/plugin.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provider.go578
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go173
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/serve.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/ui_input.go51
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/ui_output.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context.go1022
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_components.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go32
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_import.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/debug.go523
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/diff.go866
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_apply.go359
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go347
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go208
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count.go58
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_diff.go478
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_error.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_filter.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_if.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_noop.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_output.go119
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provider.go164
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go47
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go139
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go55
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_resource.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_state.go324
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate.go227
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go74
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_variable.go279
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go119
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph.go172
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go141
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go67
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go161
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_dot.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk.go60
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go157
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook.go137
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_mock.go245
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_stop.go87
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/interpolate.go790
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go198
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go35
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go85
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go44
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go240
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go357
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go288
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go83
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go53
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go190
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/path.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/plan.go153
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource.go360
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_address.go301
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider.go204
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go297
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go72
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/semantics.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_components.go273
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_context.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go815
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go282
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state.go2118
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_add.go374
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_filter.go267
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go189
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go142
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_v1.go145
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/testing.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go68
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config.go135
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go168
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go257
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go269
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_diff.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_expand.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go241
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go120
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go110
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go64
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_output.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider.go380
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go206
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_reference.go321
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go51
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_root.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_state.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_targets.go144
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_variable.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go44
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/util.go93
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/variables.go166
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version.go31
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version_required.go69
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go16
-rw-r--r--vendor/github.com/hashicorp/yamux/LICENSE362
-rw-r--r--vendor/github.com/hashicorp/yamux/README.md86
-rw-r--r--vendor/github.com/hashicorp/yamux/addr.go60
-rw-r--r--vendor/github.com/hashicorp/yamux/const.go157
-rw-r--r--vendor/github.com/hashicorp/yamux/mux.go87
-rw-r--r--vendor/github.com/hashicorp/yamux/session.go623
-rw-r--r--vendor/github.com/hashicorp/yamux/stream.go457
-rw-r--r--vendor/github.com/hashicorp/yamux/util.go28
-rw-r--r--vendor/github.com/hooklift/iso9660/LICENSE374
-rw-r--r--vendor/github.com/hooklift/iso9660/README.md25
-rw-r--r--vendor/github.com/hooklift/iso9660/iso9660.go418
-rw-r--r--vendor/github.com/hooklift/iso9660/reader.go251
-rw-r--r--vendor/github.com/hooklift/iso9660/writer.go1
-rw-r--r--vendor/github.com/imdario/mergo/LICENSE28
-rw-r--r--vendor/github.com/imdario/mergo/README.md141
-rw-r--r--vendor/github.com/imdario/mergo/doc.go44
-rw-r--r--vendor/github.com/imdario/mergo/map.go156
-rw-r--r--vendor/github.com/imdario/mergo/merge.go123
-rw-r--r--vendor/github.com/imdario/mergo/mergo.go90
-rw-r--r--vendor/github.com/jmespath/go-jmespath/LICENSE13
-rw-r--r--vendor/github.com/jmespath/go-jmespath/README.md7
-rw-r--r--vendor/github.com/jmespath/go-jmespath/api.go49
-rw-r--r--vendor/github.com/jmespath/go-jmespath/astnodetype_string.go16
-rw-r--r--vendor/github.com/jmespath/go-jmespath/functions.go842
-rw-r--r--vendor/github.com/jmespath/go-jmespath/interpreter.go418
-rw-r--r--vendor/github.com/jmespath/go-jmespath/lexer.go420
-rw-r--r--vendor/github.com/jmespath/go-jmespath/parser.go603
-rw-r--r--vendor/github.com/jmespath/go-jmespath/toktype_string.go16
-rw-r--r--vendor/github.com/jmespath/go-jmespath/util.go185
-rw-r--r--vendor/github.com/mitchellh/copystructure/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/copystructure/README.md21
-rw-r--r--vendor/github.com/mitchellh/copystructure/copier_time.go15
-rw-r--r--vendor/github.com/mitchellh/copystructure/copystructure.go545
-rw-r--r--vendor/github.com/mitchellh/go-homedir/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/go-homedir/README.md14
-rw-r--r--vendor/github.com/mitchellh/go-homedir/homedir.go137
-rw-r--r--vendor/github.com/mitchellh/go-testing-interface/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/go-testing-interface/README.md52
-rw-r--r--vendor/github.com/mitchellh/go-testing-interface/testing.go70
-rw-r--r--vendor/github.com/mitchellh/hashstructure/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/hashstructure/README.md62
-rw-r--r--vendor/github.com/mitchellh/hashstructure/hashstructure.go335
-rw-r--r--vendor/github.com/mitchellh/hashstructure/include.go15
-rw-r--r--vendor/github.com/mitchellh/mapstructure/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/mapstructure/README.md46
-rw-r--r--vendor/github.com/mitchellh/mapstructure/decode_hooks.go158
-rw-r--r--vendor/github.com/mitchellh/mapstructure/error.go50
-rw-r--r--vendor/github.com/mitchellh/mapstructure/mapstructure.go828
-rw-r--r--vendor/github.com/mitchellh/packer/LICENSE373
-rw-r--r--vendor/github.com/mitchellh/packer/README.md110
-rw-r--r--vendor/github.com/mitchellh/packer/common/uuid/uuid.go24
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/README.md6
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/location.go19
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/location_string.go16
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/reflectwalk.go392
-rw-r--r--vendor/github.com/satori/go.uuid/LICENSE20
-rw-r--r--vendor/github.com/satori/go.uuid/README.md65
-rw-r--r--vendor/github.com/satori/go.uuid/uuid.go481
-rw-r--r--vendor/github.com/vincent-petithory/dataurl/LICENSE20
-rw-r--r--vendor/github.com/vincent-petithory/dataurl/README.md81
-rw-r--r--vendor/github.com/vincent-petithory/dataurl/dataurl.go280
-rw-r--r--vendor/github.com/vincent-petithory/dataurl/doc.go28
-rw-r--r--vendor/github.com/vincent-petithory/dataurl/lex.go521
-rw-r--r--vendor/github.com/vincent-petithory/dataurl/rfc2396.go130
-rw-r--r--vendor/gopkg.in/yaml.v2/LICENSE13
-rw-r--r--vendor/gopkg.in/yaml.v2/LICENSE.libyaml31
-rw-r--r--vendor/gopkg.in/yaml.v2/README.md131
-rw-r--r--vendor/gopkg.in/yaml.v2/apic.go742
-rw-r--r--vendor/gopkg.in/yaml.v2/decode.go682
-rw-r--r--vendor/gopkg.in/yaml.v2/emitterc.go1684
-rw-r--r--vendor/gopkg.in/yaml.v2/encode.go306
-rw-r--r--vendor/gopkg.in/yaml.v2/parserc.go1095
-rw-r--r--vendor/gopkg.in/yaml.v2/readerc.go394
-rw-r--r--vendor/gopkg.in/yaml.v2/resolve.go208
-rw-r--r--vendor/gopkg.in/yaml.v2/scannerc.go2710
-rw-r--r--vendor/gopkg.in/yaml.v2/sorter.go104
-rw-r--r--vendor/gopkg.in/yaml.v2/writerc.go89
-rw-r--r--vendor/gopkg.in/yaml.v2/yaml.go346
-rw-r--r--vendor/gopkg.in/yaml.v2/yamlh.go716
-rw-r--r--vendor/gopkg.in/yaml.v2/yamlprivateh.go173
639 files changed, 132858 insertions, 0 deletions
diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE
new file mode 100644
index 00000000..21253788
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-cidr/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Martin Atkins
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go
new file mode 100644
index 00000000..a31cdec7
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go
@@ -0,0 +1,112 @@
+// Package cidr is a collection of assorted utilities for computing
+// network and host addresses within network ranges.
+//
+// It expects a CIDR-type address structure where addresses are divided into
+// some number of prefix bits representing the network and then the remaining
+// suffix bits represent the host.
+//
+// For example, it can help to calculate addresses for sub-networks of a
+// parent network, or to calculate host addresses within a particular prefix.
+//
+// At present this package is prioritizing simplicity of implementation and
+// de-prioritizing speed and memory usage. Thus caution is advised before
+// using this package in performance-critical applications or hot codepaths.
+// Patches to improve the speed and memory usage may be accepted as long as
+// they do not result in a significant increase in code complexity.
+package cidr
+
+import (
+ "fmt"
+ "math/big"
+ "net"
+)
+
+// Subnet takes a parent CIDR range and creates a subnet within it
+// with the given number of additional prefix bits and the given
+// network number.
+//
+// For example, 10.3.0.0/16, extended by 8 bits, with a network number
+// of 5, becomes 10.3.5.0/24 .
+func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) {
+ ip := base.IP
+ mask := base.Mask
+
+ parentLen, addrLen := mask.Size()
+ newPrefixLen := parentLen + newBits
+
+ if newPrefixLen > addrLen {
+ return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits)
+ }
+
+ maxNetNum := uint64(1<<uint64(newBits)) - 1
+ if uint64(num) > maxNetNum {
+ return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num)
+ }
+
+ return &net.IPNet{
+ IP: insertNumIntoIP(ip, num, newPrefixLen),
+ Mask: net.CIDRMask(newPrefixLen, addrLen),
+ }, nil
+}
+
+// Host takes a parent CIDR range and turns it into a host IP address with
+// the given host number.
+//
+// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2.
+func Host(base *net.IPNet, num int) (net.IP, error) {
+ ip := base.IP
+ mask := base.Mask
+
+ parentLen, addrLen := mask.Size()
+ hostLen := addrLen - parentLen
+
+ maxHostNum := uint64(1<<uint64(hostLen)) - 1
+
+ numUint64 := uint64(num)
+ if num < 0 {
+ numUint64 = uint64(-num) - 1
+ num = int(maxHostNum - numUint64)
+ }
+
+ if numUint64 > maxHostNum {
+ return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num)
+ }
+
+ return insertNumIntoIP(ip, num, 32), nil
+}
+
+// AddressRange returns the first and last addresses in the given CIDR range.
+func AddressRange(network *net.IPNet) (net.IP, net.IP) {
+ // the first IP is easy
+ firstIP := network.IP
+
+ // the last IP is the network address OR NOT the mask address
+ prefixLen, bits := network.Mask.Size()
+ if prefixLen == bits {
+ // Easy!
+ // But make sure that our two slices are distinct, since they
+ // would be in all other cases.
+ lastIP := make([]byte, len(firstIP))
+ copy(lastIP, firstIP)
+ return firstIP, lastIP
+ }
+
+ firstIPInt, bits := ipToInt(firstIP)
+ hostLen := uint(bits) - uint(prefixLen)
+ lastIPInt := big.NewInt(1)
+ lastIPInt.Lsh(lastIPInt, hostLen)
+ lastIPInt.Sub(lastIPInt, big.NewInt(1))
+ lastIPInt.Or(lastIPInt, firstIPInt)
+
+ return firstIP, intToIP(lastIPInt, bits)
+}
+
+// AddressCount returns the number of distinct host addresses within the given
+// CIDR range.
+//
+// Since the result is a uint64, this function returns meaningful information
+// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65.
+func AddressCount(network *net.IPNet) uint64 {
+ prefixLen, bits := network.Mask.Size()
+ return 1 << (uint64(bits) - uint64(prefixLen))
+}
diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go
new file mode 100644
index 00000000..861a5f62
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go
@@ -0,0 +1,38 @@
+package cidr
+
+import (
+ "fmt"
+ "math/big"
+ "net"
+)
+
+func ipToInt(ip net.IP) (*big.Int, int) {
+ val := &big.Int{}
+ val.SetBytes([]byte(ip))
+ if len(ip) == net.IPv4len {
+ return val, 32
+ } else if len(ip) == net.IPv6len {
+ return val, 128
+ } else {
+ panic(fmt.Errorf("Unsupported address length %d", len(ip)))
+ }
+}
+
+func intToIP(ipInt *big.Int, bits int) net.IP {
+ ipBytes := ipInt.Bytes()
+ ret := make([]byte, bits/8)
+ // Pack our IP bytes into the end of the return array,
+ // since big.Int.Bytes() removes front zero padding.
+ for i := 1; i <= len(ipBytes); i++ {
+ ret[len(ret)-i] = ipBytes[len(ipBytes)-i]
+ }
+ return net.IP(ret)
+}
+
+func insertNumIntoIP(ip net.IP, num int, prefixLen int) net.IP {
+ ipInt, totalBits := ipToInt(ip)
+ bigNum := big.NewInt(int64(num))
+ bigNum.Lsh(bigNum, uint(totalBits-prefixLen))
+ ipInt.Or(ipInt, bigNum)
+ return intToIP(ipInt, totalBits)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
new file mode 100644
index 00000000..5f14d116
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go/README.md b/vendor/github.com/aws/aws-sdk-go/README.md
new file mode 100644
index 00000000..9c871847
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/README.md
@@ -0,0 +1,449 @@
+[![API Reference](http://img.shields.io/badge/api-reference-blue.svg)](http://docs.aws.amazon.com/sdk-for-go/api) [![Join the chat at https://gitter.im/aws/aws-sdk-go](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/aws/aws-sdk-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Build Status](https://img.shields.io/travis/aws/aws-sdk-go.svg)](https://travis-ci.org/aws/aws-sdk-go) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
+
+# AWS SDK for Go
+
+aws-sdk-go is the official AWS SDK for the Go programming language.
+
+Checkout our [release notes](https://github.com/aws/aws-sdk-go/releases) for information about the latest bug fixes, updates, and features added to the SDK.
+
+## Installing
+
+If you are using Go 1.5 with the `GO15VENDOREXPERIMENT=1` vendoring flag, or 1.6 and higher you can use the following command to retrieve the SDK. The SDK's non-testing dependencies will be included and are vendored in the `vendor` folder.
+
+ go get -u github.com/aws/aws-sdk-go
+
+Otherwise if your Go environment does not have vendoring support enabled, or you do not want to include the vendored SDK's dependencies you can use the following command to retrieve the SDK and its non-testing dependencies using `go get`.
+
+ go get -u github.com/aws/aws-sdk-go/aws/...
+ go get -u github.com/aws/aws-sdk-go/service/...
+
+If you're looking to retrieve just the SDK without any dependencies use the following command.
+
+ go get -d github.com/aws/aws-sdk-go/
+
+These two processes will still include the `vendor` folder and it should be deleted if its not going to be used by your environment.
+
+ rm -rf $GOPATH/src/github.com/aws/aws-sdk-go/vendor
+
+## Getting Help
+
+Please use these community resources for getting help. We use the GitHub issues for tracking bugs and feature requests.
+
+* Ask a question on [StackOverflow](http://stackoverflow.com/) and tag it with the [`aws-sdk-go`](http://stackoverflow.com/questions/tagged/aws-sdk-go) tag.
+* Come join the AWS SDK for Go community chat on [gitter](https://gitter.im/aws/aws-sdk-go).
+* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html).
+* If you think you may of found a bug, please open an [issue](https://github.com/aws/aws-sdk-go/issues/new).
+
+## Opening Issues
+
+If you encounter a bug with the AWS SDK for Go we would like to hear about it. Search the [existing issues](https://github.com/aws/aws-sdk-go/issues) and see if others are also experiencing the issue before opening a new issue. Please include the version of AWS SDK for Go, Go language, and OS you’re using. Please also include repro case when appropriate.
+
+The GitHub issues are intended for bug reports and feature requests. For help and questions with using AWS SDK for GO please make use of the resources listed in the [Getting Help](https://github.com/aws/aws-sdk-go#getting-help) section. Keeping the list of open issues lean will help us respond in a timely manner.
+
+## Reference Documentation
+
+[`Getting Started Guide`](https://aws.amazon.com/sdk-for-go/) - This document is a general introduction how to configure and make requests with the SDK. If this is your first time using the SDK, this documentation and the API documentation will help you get started. This document focuses on the syntax and behavior of the SDK. The [Service Developer Guide](https://aws.amazon.com/documentation/) will help you get started using specific AWS services.
+
+[`SDK API Reference Documentation`](https://docs.aws.amazon.com/sdk-for-go/api/) - Use this document to look up all API operation input and output parameters for AWS services supported by the SDK. The API reference also includes documentation of the SDK, and examples how to using the SDK, service client API operations, and API operation require parameters.
+
+[`Service Developer Guide`](https://aws.amazon.com/documentation/) - Use this documentation to learn how to interface with an AWS service. These are great guides both, if you're getting started with a service, or looking for more information on a service. You should not need this document for coding, though in some cases, services may supply helpful samples that you might want to look out for.
+
+[`SDK Examples`](https://github.com/aws/aws-sdk-go/tree/master/example) - Included in the SDK's repo are a several hand crafted examples using the SDK features and AWS services.
+
+## Overview of SDK's Packages
+
+The SDK is composed of two main components, SDK core, and service clients.
+The SDK core packages are all available under the aws package at the root of
+the SDK. Each client for a supported AWS service is available within its own
+package under the service folder at the root of the SDK.
+
+ * aws - SDK core, provides common shared types such as Config, Logger,
+ and utilities to make working with API parameters easier.
+
+ * awserr - Provides the error interface that the SDK will use for all
+ errors that occur in the SDK's processing. This includes service API
+ response errors as well. The Error type is made up of a code and message.
+ Cast the SDK's returned error type to awserr.Error and call the Code
+ method to compare returned error to specific error codes. See the package's
+ documentation for additional values that can be extracted such as RequestID.
+
+ * credentials - Provides the types and built in credentials providers
+ the SDK will use to retrieve AWS credentials to make API requests with.
+ Nested under this folder are also additional credentials providers such as
+ stscreds for assuming IAM roles, and ec2rolecreds for EC2 Instance roles.
+
+ * endpoints - Provides the AWS Regions and Endpoints metadata for the SDK.
+ Use this to lookup AWS service endpoint information such as which services
+ are in a region, and what regions a service is in. Constants are also provided
+ for all region identifiers, e.g UsWest2RegionID for "us-west-2".
+
+ * session - Provides initial default configuration, and load
+ configuration from external sources such as environment and shared
+ credentials file.
+
+ * request - Provides the API request sending, and retry logic for the SDK.
+ This package also includes utilities for defining your own request
+ retryer, and configuring how the SDK processes the request.
+
+ * service - Clients for AWS services. All services supported by the SDK are
+ available under this folder.
+
+## How to Use the SDK's AWS Service Clients
+
+The SDK includes the Go types and utilities you can use to make requests to
+AWS service APIs. Within the service folder at the root of the SDK you'll find
+a package for each AWS service the SDK supports. All service clients follows
+a common pattern of creation and usage.
+
+When creating a client for an AWS service you'll first need to have a Session
+value constructed. The Session provides shared configuration that can be shared
+between your service clients. When service clients are created you can pass
+in additional configuration via the aws.Config type to override configuration
+provided by in the Session to create service client instances with custom
+configuration.
+
+Once the service's client is created you can use it to make API requests the
+AWS service. These clients are safe to use concurrently.
+
+## Configuring the SDK
+
+In the AWS SDK for Go, you can configure settings for service clients, such
+as the log level and maximum number of retries. Most settings are optional;
+however, for each service client, you must specify a region and your credentials.
+The SDK uses these values to send requests to the correct AWS region and sign
+requests with the correct credentials. You can specify these values as part
+of a session or as environment variables.
+
+See the SDK's [configuration guide][config_guide] for more information.
+
+See the [session][session_pkg] package documentation for more information on how to use Session
+with the SDK.
+
+See the [Config][config_typ] type in the [aws][aws_pkg] package for more information on configuration
+options.
+
+[config_guide]: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html
+[session_pkg]: https://docs.aws.amazon.com/sdk-for-go/api/aws/session/
+[config_typ]: https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+[aws_pkg]: https://docs.aws.amazon.com/sdk-for-go/api/aws/
+
+### Configuring Credentials
+
+When using the SDK you'll generally need your AWS credentials to authenticate
+with AWS services. The SDK supports multiple methods of supporting these
+credentials. By default the SDK will source credentials automatically from
+its default credential chain. See the session package for more information
+on this chain, and how to configure it. The common items in the credential
+chain are the following:
+
+ * Environment Credentials - Set of environment variables that are useful
+ when sub processes are created for specific roles.
+
+ * Shared Credentials file (~/.aws/credentials) - This file stores your
+ credentials based on a profile name and is useful for local development.
+
+ * EC2 Instance Role Credentials - Use EC2 Instance Role to assign credentials
+ to application running on an EC2 instance. This removes the need to manage
+ credential files in production.
+
+Credentials can be configured in code as well by setting the Config's Credentials
+value to a custom provider or using one of the providers included with the
+SDK to bypass the default credential chain and use a custom one. This is
+helpful when you want to instruct the SDK to only use a specific set of
+credentials or providers.
+
+This example creates a credential provider for assuming an IAM role, "myRoleARN"
+and configures the S3 service client to use that role for API requests.
+
+```go
+ // Initial credentials loaded from SDK's default credential chain. Such as
+ // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+ // Role. These credentials will be used to to make the STS Assume Role API.
+ sess := session.Must(session.NewSession())
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN.
+ creds := stscreds.NewCredentials(sess, "myRoleArn")
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})/
+```
+
+See the [credentials][credentials_pkg] package documentation for more information on credential
+providers included with the SDK, and how to customize the SDK's usage of
+credentials.
+
+The SDK has support for the shared configuration file (~/.aws/config). This
+support can be enabled by setting the environment variable, "AWS_SDK_LOAD_CONFIG=1",
+or enabling the feature in code when creating a Session via the
+Option's SharedConfigState parameter.
+
+```go
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ SharedConfigState: session.SharedConfigEnable,
+ }))
+```
+
+[credentials_pkg]: ttps://docs.aws.amazon.com/sdk-for-go/api/aws/credentials
+
+### Configuring AWS Region
+
+In addition to the credentials you'll need to specify the region the SDK
+will use to make AWS API requests to. In the SDK you can specify the region
+either with an environment variable, or directly in code when a Session or
+service client is created. The last value specified in code wins if the region
+is specified multiple ways.
+
+To set the region via the environment variable set the "AWS_REGION" to the
+region you want to the SDK to use. Using this method to set the region will
+allow you to run your application in multiple regions without needing additional
+code in the application to select the region.
+
+ AWS_REGION=us-west-2
+
+The endpoints package includes constants for all regions the SDK knows. The
+values are all suffixed with RegionID. These values are helpful, because they
+reduce the need to type the region string manually.
+
+To set the region on a Session use the aws package's Config struct parameter
+Region to the AWS region you want the service clients created from the session to
+use. This is helpful when you want to create multiple service clients, and
+all of the clients make API requests to the same region.
+
+```go
+ sess := session.Must(session.NewSession(&aws.Config{
+ Region: aws.String(endpoints.UsWest2RegionID),
+ }))
+```
+
+See the [endpoints][endpoints_pkg] package for the AWS Regions and Endpoints metadata.
+
+In addition to setting the region when creating a Session you can also set
+the region on a per service client bases. This overrides the region of a
+Session. This is helpful when you want to create service clients in specific
+regions different from the Session's region.
+
+```go
+ svc := s3.New(sess, &aws.Config{
+ Region: aws.String(endpoints.UsWest2RegionID),
+ })
+```
+
+See the [Config][config_typ] type in the [aws][aws_pkg] package for more information and additional
+options such as setting the Endpoint, and other service client configuration options.
+
+[endpoints_pkg]: https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/
+
+## Making API Requests
+
+Once the client is created you can make an API request to the service.
+Each API method takes a input parameter, and returns the service response
+and an error. The SDK provides methods for making the API call in multiple ways.
+
+In this list we'll use the S3 ListObjects API as an example for the different
+ways of making API requests.
+
+ * ListObjects - Base API operation that will make the API request to the service.
+
+ * ListObjectsRequest - API methods suffixed with Request will construct the
+ API request, but not send it. This is also helpful when you want to get a
+ presigned URL for a request, and share the presigned URL instead of your
+ application making the request directly.
+
+ * ListObjectsPages - Same as the base API operation, but uses a callback to
+ automatically handle pagination of the API's response.
+
+ * ListObjectsWithContext - Same as base API operation, but adds support for
+ the Context pattern. This is helpful for controlling the canceling of in
+ flight requests. See the Go standard library context package for more
+ information. This method also takes request package's Option functional
+ options as the variadic argument for modifying how the request will be
+ made, or extracting information from the raw HTTP response.
+
+ * ListObjectsPagesWithContext - same as ListObjectsPages, but adds support for
+ the Context pattern. Similar to ListObjectsWithContext this method also
+ takes the request package's Option function option types as the variadic
+ argument.
+
+In addition to the API operations the SDK also includes several higher level
+methods that abstract checking for and waiting for an AWS resource to be in
+a desired state. In this list we'll use WaitUntilBucketExists to demonstrate
+the different forms of waiters.
+
+ * WaitUntilBucketExists. - Method to make API request to query an AWS service for
+ a resource's state. Will return successfully when that state is accomplished.
+
+ * WaitUntilBucketExistsWithContext - Same as WaitUntilBucketExists, but adds
+ support for the Context pattern. In addition these methods take request
+ package's WaiterOptions to configure the waiter, and how underlying request
+ will be made by the SDK.
+
+The API method will document which error codes the service might return for
+the operation. These errors will also be available as const strings prefixed
+with "ErrCode" in the service client's package. If there are no errors listed
+in the API's SDK documentation you'll need to consult the AWS service's API
+documentation for the errors that could be returned.
+
+```go
+ ctx := context.Background()
+
+ result, err := svc.GetObjectWithContext(ctx, &s3.GetObjectInput{
+ Bucket: aws.String("my-bucket"),
+ Key: aws.String("my-key"),
+ })
+ if err != nil {
+ // Cast err to awserr.Error to handle specific error codes.
+ aerr, ok := err.(awserr.Error)
+ if ok && aerr.Code() == s3.ErrCodeNoSuchKey {
+ // Specific error code handling
+ }
+ return err
+ }
+
+ // Make sure to close the body when done with it for S3 GetObject APIs or
+ // will leak connections.
+ defer result.Body.Close()
+
+ fmt.Println("Object Size:", aws.StringValue(result.ContentLength))
+```
+
+### API Request Pagination and Resource Waiters
+
+Pagination helper methods are suffixed with "Pages", and provide the
+functionality needed to round trip API page requests. Pagination methods
+take a callback function that will be called for each page of the API's response.
+
+```go
+ objects := []string{}
+ err := svc.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
+ Bucket: aws.String(myBucket),
+ }, func(p *s3.ListObjectsOutput, lastPage bool) bool {
+ for _, o := range p.Contents {
+ objects = append(objects, aws.StringValue(o.Key))
+ }
+ return true // continue paging
+ })
+ if err != nil {
+ panic(fmt.Sprintf("failed to list objects for bucket, %s, %v", myBucket, err))
+ }
+
+ fmt.Println("Objects in bucket:", objects)
+```
+
+Waiter helper methods provide the functionality to wait for an AWS resource
+state. These methods abstract the logic needed to to check the state of an
+AWS resource, and wait until that resource is in a desired state. The waiter
+will block until the resource is in the state that is desired, an error occurs,
+or the waiter times out. If a resource times out the error code returned will
+be request.WaiterResourceNotReadyErrorCode.
+
+```go
+ err := svc.WaitUntilBucketExistsWithContext(ctx, &s3.HeadBucketInput{
+ Bucket: aws.String(myBucket),
+ })
+ if err != nil {
+ aerr, ok := err.(awserr.Error)
+ if ok && aerr.Code() == request.WaiterResourceNotReadyErrorCode {
+ fmt.Fprintf(os.Stderr, "timed out while waiting for bucket to exist")
+ }
+ panic(fmt.Errorf("failed to wait for bucket to exist, %v", err))
+ }
+ fmt.Println("Bucket", myBucket, "exists")
+```
+
+## Complete SDK Example
+
+This example shows a complete working Go file which will upload a file to S3
+and use the Context pattern to implement timeout logic that will cancel the
+request if it takes too long. This example highlights how to use sessions,
+create a service client, make a request, handle the error, and process the
+response.
+
+```go
+ package main
+
+ import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+ )
+
+ // Uploads a file to S3 given a bucket and object key. Also takes a duration
+ // value to terminate the update if it doesn't complete within that time.
+ //
+ // The AWS Region needs to be provided in the AWS shared config or on the
+ // environment variable as `AWS_REGION`. Credentials also must be provided
+ // Will default to shared config file, but can load from environment if provided.
+ //
+ // Usage:
+ // # Upload myfile.txt to myBucket/myKey. Must complete within 10 minutes or will fail
+ // go run withContext.go -b mybucket -k myKey -d 10m < myfile.txt
+ func main() {
+ var bucket, key string
+ var timeout time.Duration
+
+ flag.StringVar(&bucket, "b", "", "Bucket name.")
+ flag.StringVar(&key, "k", "", "Object key name.")
+ flag.DurationVar(&timeout, "d", 0, "Upload timeout.")
+ flag.Parse()
+
+ // All clients require a Session. The Session provides the client with
+ // shared configuration such as region, endpoint, and credentials. A
+ // Session should be shared where possible to take advantage of
+ // configuration and credential caching. See the session package for
+ // more information.
+ sess := session.Must(session.NewSession())
+
+ // Create a new instance of the service's client with a Session.
+ // Optional aws.Config values can also be provided as variadic arguments
+ // to the New function. This option allows you to provide service
+ // specific configuration.
+ svc := s3.New(sess)
+
+ // Create a context with a timeout that will abort the upload if it takes
+ // more than the passed in timeout.
+ ctx := context.Background()
+ var cancelFn func()
+ if timeout > 0 {
+ ctx, cancelFn = context.WithTimeout(ctx, timeout)
+ }
+ // Ensure the context is canceled to prevent leaking.
+ // See context package for more information, https://golang.org/pkg/context/
+ defer cancelFn()
+
+ // Uploads the object to S3. The Context will interrupt the request if the
+ // timeout expires.
+ _, err := svc.PutObjectWithContext(ctx, &s3.PutObjectInput{
+ Bucket: aws.String(bucket),
+ Key: aws.String(key),
+ Body: os.Stdin,
+ })
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok && aerr.Code() == request.CanceledErrorCode {
+ // If the SDK can determine the request or retry delay was canceled
+ // by a context the CanceledErrorCode error code will be returned.
+ fmt.Fprintf(os.Stderr, "upload canceled due to timeout, %v\n", err)
+ } else {
+ fmt.Fprintf(os.Stderr, "failed to upload object, %v\n", err)
+ }
+ os.Exit(1)
+ }
+
+ fmt.Printf("successfully uploaded file to %s/%s\n", bucket, key)
+ }
+```
+
+## License
+
+This SDK is distributed under the
+[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0),
+see LICENSE.txt and NOTICE.txt for more information.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
new file mode 100644
index 00000000..56fdfc2b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
@@ -0,0 +1,145 @@
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+//
+// Calling Error() or String() will always include the full information about
+// an error based on its underlying type.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Get error details
+// log.Println("Error:", awsErr.Code(), awsErr.Message())
+//
+// // Prints out full error message, including original error if there was one.
+// log.Println("Error:", awsErr.Error())
+//
+// // Get original error
+// if origErr := awsErr.OrigErr(); origErr != nil {
+// // operate on original error.
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type Error interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErr() error
+}
+
+// BatchError is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Deprecated: Replaced with BatchedErrors. Only defined for backwards
+// compatibility.
+type BatchError interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// BatchedErrors is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Replaces BatchError
+type BatchedErrors interface {
+ // Satisfy the base Error interface.
+ Error
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+ var errs []error
+ if origErr != nil {
+ errs = append(errs, origErr)
+ }
+ return newBaseError(code, message, errs)
+}
+
+// NewBatchError returns an BatchedErrors with a collection of errors as an
+// array of errors.
+func NewBatchError(code, message string, errs []error) BatchedErrors {
+ return newBaseError(code, message, errs)
+}
+
+// A RequestFailure is an interface to extract request failure information from
+// an Error such as the request ID of the failed request returned by a service.
+// RequestFailures may not always have a requestID value if the request failed
+// prior to reaching the service such as a connection error.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if reqerr, ok := err.(RequestFailure); ok {
+// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+// } else {
+// log.Println("Error:", err.Error())
+// }
+// }
+//
+// Combined with awserr.Error:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Generic AWS Error with Code, Message, and original error (if any)
+// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+//
+// if reqErr, ok := err.(awserr.RequestFailure); ok {
+// // A service error occurred
+// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type RequestFailure interface {
+ Error
+
+ // The status code of the HTTP response.
+ StatusCode() int
+
+ // The request ID returned by the service for a request failure. This will
+ // be empty if no request ID is available such as the request failed due
+ // to a connection error.
+ RequestID() string
+}
+
+// NewRequestFailure returns a new request error wrapper for the given Error
+// provided.
+func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
+ return newRequestError(err, statusCode, reqID)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
new file mode 100644
index 00000000..0202a008
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
@@ -0,0 +1,194 @@
+package awserr
+
+import "fmt"
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional. If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+ msg := fmt.Sprintf("%s: %s", code, message)
+ if extra != "" {
+ msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+ }
+ if origErr != nil {
+ msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+ }
+ return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+ // Classification of error
+ code string
+
+ // Detailed information about error
+ message string
+
+ // Optional original error this error is based off of. Allows building
+ // chained errors.
+ errs []error
+}
+
+// newBaseError returns an error object for the code, message, and errors.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the
+// error.
+//
+// origErrs is the error objects which will be nested under the new errors to
+// be returned.
+func newBaseError(code, message string, origErrs []error) *baseError {
+ b := &baseError{
+ code: code,
+ message: message,
+ errs: origErrs,
+ }
+
+ return b
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+ size := len(b.errs)
+ if size > 0 {
+ return SprintError(b.code, b.message, "", errorList(b.errs))
+ }
+
+ return SprintError(b.code, b.message, "", nil)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+ return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+ return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+ return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no
+// error was set. This only returns the first element in the list. If the full
+// list is needed, use BatchedErrors.
+func (b baseError) OrigErr() error {
+ switch len(b.errs) {
+ case 0:
+ return nil
+ case 1:
+ return b.errs[0]
+ default:
+ if err, ok := b.errs[0].(Error); ok {
+ return NewBatchError(err.Code(), err.Message(), b.errs[1:])
+ }
+ return NewBatchError("BatchedErrors",
+ "multiple errors occurred", b.errs)
+ }
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (b baseError) OrigErrs() []error {
+ return b.errs
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError Error
+
+// A requestError wraps a request or service error.
+//
+// Composed of baseError for code, message, and original error.
+type requestError struct {
+ awsError
+ statusCode int
+ requestID string
+}
+
+// newRequestError returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+//
+// Also wraps original errors via the baseError.
+func newRequestError(err Error, statusCode int, requestID string) *requestError {
+ return &requestError{
+ awsError: err,
+ statusCode: statusCode,
+ requestID: requestID,
+ }
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (r requestError) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s",
+ r.statusCode, r.requestID)
+ return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (r requestError) String() string {
+ return r.Error()
+}
+
+// StatusCode returns the wrapped status code for the error
+func (r requestError) StatusCode() int {
+ return r.statusCode
+}
+
+// RequestID returns the wrapped requestID
+func (r requestError) RequestID() string {
+ return r.requestID
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (r requestError) OrigErrs() []error {
+ if b, ok := r.awsError.(BatchedErrors); ok {
+ return b.OrigErrs()
+ }
+ return []error{r.OrigErr()}
+}
+
+// An error list that satisfies the golang interface
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+ msg := ""
+ // How do we want to handle the array size being zero
+ if size := len(e); size > 0 {
+ for i := 0; i < size; i++ {
+ msg += fmt.Sprintf("%s", e[i].Error())
+ // We check the next index to see if it is within the slice.
+ // If it is, then we append a newline. We do this, because unit tests
+ // could be broken with the additional '\n'
+ if i+1 < size {
+ msg += "\n"
+ }
+ }
+ }
+ return msg
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
new file mode 100644
index 00000000..1a3d106d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
@@ -0,0 +1,108 @@
+package awsutil
+
+import (
+ "io"
+ "reflect"
+ "time"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+ dstval := reflect.ValueOf(dst)
+ if !dstval.IsValid() {
+ panic("Copy dst cannot be nil")
+ }
+
+ rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+ dsti := reflect.New(reflect.TypeOf(src).Elem())
+ dst = dsti.Interface()
+ rcopy(dsti, reflect.ValueOf(src), true)
+ return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+ if !src.IsValid() {
+ return
+ }
+
+ switch src.Kind() {
+ case reflect.Ptr:
+ if _, ok := src.Interface().(io.Reader); ok {
+ if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+ dst.Elem().Set(src)
+ } else if dst.CanSet() {
+ dst.Set(src)
+ }
+ } else {
+ e := src.Type().Elem()
+ if dst.CanSet() && !src.IsNil() {
+ if _, ok := src.Interface().(*time.Time); !ok {
+ dst.Set(reflect.New(e))
+ } else {
+ tempValue := reflect.New(e)
+ tempValue.Elem().Set(src.Elem())
+ // Sets time.Time's unexported values
+ dst.Set(tempValue)
+ }
+ }
+ if src.Elem().IsValid() {
+ // Keep the current root state since the depth hasn't changed
+ rcopy(dst.Elem(), src.Elem(), root)
+ }
+ }
+ case reflect.Struct:
+ t := dst.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ srcVal := src.FieldByName(name)
+ dstVal := dst.FieldByName(name)
+ if srcVal.IsValid() && dstVal.CanSet() {
+ rcopy(dstVal, srcVal, false)
+ }
+ }
+ case reflect.Slice:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ dst.Set(s)
+ for i := 0; i < src.Len(); i++ {
+ rcopy(dst.Index(i), src.Index(i), false)
+ }
+ case reflect.Map:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeMap(src.Type())
+ dst.Set(s)
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ v2 := reflect.New(v.Type()).Elem()
+ rcopy(v2, v, false)
+ dst.SetMapIndex(k, v2)
+ }
+ default:
+ // Assign the value if possible. If its not assignable, the value would
+ // need to be converted and the impact of that may be unexpected, or is
+ // not compatible with the dst type.
+ if src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
new file mode 100644
index 00000000..59fa4a55
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
@@ -0,0 +1,27 @@
+package awsutil
+
+import (
+ "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+ ra := reflect.Indirect(reflect.ValueOf(a))
+ rb := reflect.Indirect(reflect.ValueOf(b))
+
+ if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+ // If the elements are both nil, and of the same type the are equal
+ // If they are of different types they are not equal
+ return reflect.TypeOf(a) == reflect.TypeOf(b)
+ } else if raValid != rbValid {
+ // Both values must be valid to be equal
+ return false
+ }
+
+ return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
new file mode 100644
index 00000000..11c52c38
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
@@ -0,0 +1,222 @@
+package awsutil
+
+import (
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
+ pathparts := strings.Split(path, "||")
+ if len(pathparts) > 1 {
+ for _, pathpart := range pathparts {
+ vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
+ if len(vals) > 0 {
+ return vals
+ }
+ }
+ return nil
+ }
+
+ values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+ components := strings.Split(path, ".")
+ for len(values) > 0 && len(components) > 0 {
+ var index *int64
+ var indexStar bool
+ c := strings.TrimSpace(components[0])
+ if c == "" { // no actual component, illegal syntax
+ return nil
+ } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+ // TODO normalize case for user
+ return nil // don't support unexported fields
+ }
+
+ // parse this component
+ if m := indexRe.FindStringSubmatch(c); m != nil {
+ c = m[1]
+ if m[2] == "" {
+ index = nil
+ indexStar = true
+ } else {
+ i, _ := strconv.ParseInt(m[2], 10, 32)
+ index = &i
+ indexStar = false
+ }
+ }
+
+ nextvals := []reflect.Value{}
+ for _, value := range values {
+ // pull component name out of struct member
+ if value.Kind() != reflect.Struct {
+ continue
+ }
+
+ if c == "*" { // pull all members
+ for i := 0; i < value.NumField(); i++ {
+ if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+ nextvals = append(nextvals, f)
+ }
+ }
+ continue
+ }
+
+ value = value.FieldByNameFunc(func(name string) bool {
+ if c == name {
+ return true
+ } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+ return true
+ }
+ return false
+ })
+
+ if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
+ if !value.IsNil() {
+ value.Set(reflect.Zero(value.Type()))
+ }
+ return []reflect.Value{value}
+ }
+
+ if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
+ // TODO if the value is the terminus it should not be created
+ // if the value to be set to its position is nil.
+ value.Set(reflect.New(value.Type().Elem()))
+ value = value.Elem()
+ } else {
+ value = reflect.Indirect(value)
+ }
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+
+ if indexStar || index != nil {
+ nextvals = []reflect.Value{}
+ for _, valItem := range values {
+ value := reflect.Indirect(valItem)
+ if value.Kind() != reflect.Slice {
+ continue
+ }
+
+ if indexStar { // grab all indices
+ for i := 0; i < value.Len(); i++ {
+ idx := reflect.Indirect(value.Index(i))
+ if idx.IsValid() {
+ nextvals = append(nextvals, idx)
+ }
+ }
+ continue
+ }
+
+ // pull out index
+ i := int(*index)
+ if i >= value.Len() { // check out of bounds
+ if createPath {
+ // TODO resize slice
+ } else {
+ continue
+ }
+ } else if i < 0 { // support negative indexing
+ i = value.Len() + i
+ }
+ value = reflect.Indirect(value.Index(i))
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+ }
+
+ components = components[1:]
+ }
+ return values
+}
+
+// ValuesAtPath returns a list of values at the case insensitive lexical
+// path inside of a structure.
+func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
+ result, err := jmespath.Search(path, i)
+ if err != nil {
+ return nil, err
+ }
+
+ v := reflect.ValueOf(result)
+ if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return nil, nil
+ }
+ if s, ok := result.([]interface{}); ok {
+ return s, err
+ }
+ if v.Kind() == reflect.Map && v.Len() == 0 {
+ return nil, nil
+ }
+ if v.Kind() == reflect.Slice {
+ out := make([]interface{}, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ out[i] = v.Index(i).Interface()
+ }
+ return out, nil
+ }
+
+ return []interface{}{result}, nil
+}
+
+// SetValueAtPath sets a value at the case insensitive lexical path inside
+// of a structure.
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+ if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
+ for _, rval := range rvals {
+ if rval.Kind() == reflect.Ptr && rval.IsNil() {
+ continue
+ }
+ setValue(rval, v)
+ }
+ }
+}
+
+func setValue(dstVal reflect.Value, src interface{}) {
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal = reflect.Indirect(dstVal)
+ }
+ srcVal := reflect.ValueOf(src)
+
+ if !srcVal.IsValid() { // src is literal nil
+ if dstVal.CanAddr() {
+ // Convert to pointer so that pointer's value can be nil'ed
+ // dstVal = dstVal.Addr()
+ }
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+
+ } else if srcVal.Kind() == reflect.Ptr {
+ if srcVal.IsNil() {
+ srcVal = reflect.Zero(dstVal.Type())
+ } else {
+ srcVal = reflect.ValueOf(src).Elem()
+ }
+ dstVal.Set(srcVal)
+ } else {
+ dstVal.Set(srcVal)
+ }
+
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
new file mode 100644
index 00000000..710eb432
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
@@ -0,0 +1,113 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+ var buf bytes.Buffer
+ prettify(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ strtype := v.Type().String()
+ if strtype == "time.Time" {
+ fmt.Fprintf(buf, "%s", v.Interface())
+ break
+ } else if strings.HasPrefix(strtype, "io.") {
+ buf.WriteString("<buffer>")
+ break
+ }
+
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ prettify(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ strtype := v.Type().String()
+ if strtype == "[]uint8" {
+ fmt.Fprintf(buf, "<binary> len %d", v.Len())
+ break
+ }
+
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ prettify(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ prettify(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ if !v.IsValid() {
+ fmt.Fprint(buf, "<invalid value>")
+ return
+ }
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ case io.ReadSeeker, io.Reader:
+ format = "buffer(%p)"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
new file mode 100644
index 00000000..b6432f1a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
@@ -0,0 +1,89 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+ var buf bytes.Buffer
+ stringValue(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ stringValue(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ stringValue(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ stringValue(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
new file mode 100644
index 00000000..48b0fbd9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -0,0 +1,149 @@
+package client
+
+import (
+ "fmt"
+ "net/http/httputil"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Config provides configuration to a service client instance.
+type Config struct {
+ Config *aws.Config
+ Handlers request.Handlers
+ Endpoint string
+ SigningRegion string
+ SigningName string
+}
+
+// ConfigProvider provides a generic way for a service client to receive
+// the ClientConfig without circular dependencies.
+type ConfigProvider interface {
+ ClientConfig(serviceName string, cfgs ...*aws.Config) Config
+}
+
+// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
+// resolve the endpoint automatically. The service client's endpoint must be
+// provided via the aws.Config.Endpoint field.
+type ConfigNoResolveEndpointProvider interface {
+ ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
+}
+
+// A Client implements the base client request and response handling
+// used by all service clients.
+type Client struct {
+ request.Retryer
+ metadata.ClientInfo
+
+ Config aws.Config
+ Handlers request.Handlers
+}
+
+// New will return a pointer to a new initialized service client.
+func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
+ svc := &Client{
+ Config: cfg,
+ ClientInfo: info,
+ Handlers: handlers.Copy(),
+ }
+
+ switch retryer, ok := cfg.Retryer.(request.Retryer); {
+ case ok:
+ svc.Retryer = retryer
+ case cfg.Retryer != nil && cfg.Logger != nil:
+ s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
+ cfg.Logger.Log(s)
+ fallthrough
+ default:
+ maxRetries := aws.IntValue(cfg.MaxRetries)
+ if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
+ maxRetries = 3
+ }
+ svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
+ }
+
+ svc.AddDebugHandlers()
+
+ for _, option := range options {
+ option(svc)
+ }
+
+ return svc
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
+ return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (c *Client) AddDebugHandlers() {
+ if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
+ return
+ }
+
+ c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest})
+ c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse})
+}
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
+---[ REQUEST DUMP ERROR ]-----------------------------
+%s
+-----------------------------------------------------`
+
+func logRequest(r *request.Request) {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
+ r.Error = awserr.New(request.ErrCodeRead, "an error occurred during request body reading", err)
+ return
+ }
+
+ if logBody {
+ // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
+ // Body as a NoOpCloser and will not be reset after read by the HTTP
+ // client reader.
+ r.ResetBody()
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
+---[ RESPONSE DUMP ERROR ]-----------------------------
+%s
+-----------------------------------------------------`
+
+func logResponse(r *request.Request) {
+ var msg = "no response data"
+ if r.HTTPResponse != nil {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
+ r.Error = awserr.New(request.ErrCodeRead, "an error occurred during response body reading", err)
+ return
+ }
+
+ msg = string(dumpedBody)
+ } else if r.Error != nil {
+ msg = r.Error.Error()
+ }
+ r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
new file mode 100644
index 00000000..1313478f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
@@ -0,0 +1,96 @@
+package client
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// DefaultRetryer implements basic retry logic using exponential backoff for
+// most services. If you want to implement custom retry logic, implement the
+// request.Retryer interface or create a structure type that composes this
+// struct and override the specific methods. For example, to override only
+// the MaxRetries method:
+//
+// type retryer struct {
+// service.DefaultRetryer
+// }
+//
+// // This implementation always has 100 max retries
+// func (d retryer) MaxRetries() uint { return 100 }
+type DefaultRetryer struct {
+ NumMaxRetries int
+}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API request.
+func (d DefaultRetryer) MaxRetries() int {
+ return d.NumMaxRetries
+}
+
+var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
+
+// RetryRules returns the delay duration before retrying this request again
+func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
+ // Set the upper limit of delay in retrying at ~five minutes
+ minTime := 30
+ throttle := d.shouldThrottle(r)
+ if throttle {
+ minTime = 500
+ }
+
+ retryCount := r.RetryCount
+ if retryCount > 13 {
+ retryCount = 13
+ } else if throttle && retryCount > 8 {
+ retryCount = 8
+ }
+
+ delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
+ return time.Duration(delay) * time.Millisecond
+}
+
+// ShouldRetry returns true if the request should be retried.
+func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable != nil {
+ return *r.Retryable
+ }
+
+ if r.HTTPResponse.StatusCode >= 500 {
+ return true
+ }
+ return r.IsErrorRetryable() || d.shouldThrottle(r)
+}
+
+// ShouldThrottle returns true if the request should be throttled.
+func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
+ if r.HTTPResponse.StatusCode == 502 ||
+ r.HTTPResponse.StatusCode == 503 ||
+ r.HTTPResponse.StatusCode == 504 {
+ return true
+ }
+ return r.IsErrorThrottle()
+}
+
+// lockedSource is a thread-safe implementation of rand.Source
+type lockedSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
new file mode 100644
index 00000000..4778056d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
@@ -0,0 +1,12 @@
+package metadata
+
+// ClientInfo wraps immutable data from the client.Client structure.
+type ClientInfo struct {
+ ServiceName string
+ APIVersion string
+ Endpoint string
+ SigningName string
+ SigningRegion string
+ JSONVersion string
+ TargetPrefix string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
new file mode 100644
index 00000000..d1f31f1c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -0,0 +1,470 @@
+package aws
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+)
+
+// UseServiceDefaultRetries instructs the config to use the service's own
+// default number of retries. This will be the default action if
+// Config.MaxRetries is nil also.
+const UseServiceDefaultRetries = -1
+
+// RequestRetryer is an alias for a type that implements the request.Retryer
+// interface.
+type RequestRetryer interface{}
+
+// A Config provides service configuration for service clients. By default,
+// all clients will use the defaults.DefaultConfig tructure.
+//
+// // Create Session with MaxRetry configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(&aws.Config{
+// MaxRetries: aws.Int(3),
+// }))
+//
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, &aws.Config{
+// Region: aws.String("us-west-2"),
+// })
+type Config struct {
+ // Enables verbose error printing of all credential chain errors.
+ // Should be used when wanting to see all errors while attempting to
+ // retrieve credentials.
+ CredentialsChainVerboseErrors *bool
+
+ // The credentials object to use when signing requests. Defaults to a
+ // chain of credential providers to search for credentials in environment
+ // variables, shared credential file, and EC2 Instance Roles.
+ Credentials *credentials.Credentials
+
+ // An optional endpoint URL (hostname only or fully qualified URI)
+ // that overrides the default generated endpoint for a client. Set this
+ // to `""` to use the default generated endpoint.
+ //
+ // @note You must still provide a `Region` value when specifying an
+ // endpoint for a client.
+ Endpoint *string
+
+ // The resolver to use for looking up endpoints for AWS service clients
+ // to use based on region.
+ EndpointResolver endpoints.Resolver
+
+ // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
+ // ShouldRetry regardless of whether or not if request.Retryable is set.
+ // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
+ // is not set, then ShouldRetry will only be called if request.Retryable is nil.
+ // Proper handling of the request.Retryable field is important when setting this field.
+ EnforceShouldRetryCheck *bool
+
+ // The region to send requests to. This parameter is required and must
+ // be configured globally or on a per-client basis unless otherwise
+ // noted. A full list of regions is found in the "Regions and Endpoints"
+ // document.
+ //
+ // @see http://docs.aws.amazon.com/general/latest/gr/rande.html
+ // AWS Regions and Endpoints
+ Region *string
+
+ // Set this to `true` to disable SSL when sending requests. Defaults
+ // to `false`.
+ DisableSSL *bool
+
+ // The HTTP client to use when sending requests. Defaults to
+ // `http.DefaultClient`.
+ HTTPClient *http.Client
+
+ // An integer value representing the logging level. The default log level
+ // is zero (LogOff), which represents no logging. To enable logging set
+ // to a LogLevel Value.
+ LogLevel *LogLevelType
+
+ // The logger writer interface to write logging messages to. Defaults to
+ // standard out.
+ Logger Logger
+
+ // The maximum number of times that a request will be retried for failures.
+ // Defaults to -1, which defers the max retry setting to the service
+ // specific configuration.
+ MaxRetries *int
+
+ // Retryer guides how HTTP requests should be retried in case of
+ // recoverable failures.
+ //
+ // When nil or the value does not implement the request.Retryer interface,
+ // the request.DefaultRetryer will be used.
+ //
+ // When both Retryer and MaxRetries are non-nil, the former is used and
+ // the latter ignored.
+ //
+ // To set the Retryer field in a type-safe manner and with chaining, use
+ // the request.WithRetryer helper function:
+ //
+ // cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
+ //
+ Retryer RequestRetryer
+
+ // Disables semantic parameter validation, which validates input for
+ // missing required fields and/or other semantic request input errors.
+ DisableParamValidation *bool
+
+ // Disables the computation of request and response checksums, e.g.,
+ // CRC32 checksums in Amazon DynamoDB.
+ DisableComputeChecksums *bool
+
+ // Set this to `true` to force the request to use path-style addressing,
+ // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
+ // will use virtual hosted bucket addressing when possible
+ // (`http://BUCKET.s3.amazonaws.com/KEY`).
+ //
+ // @note This configuration option is specific to the Amazon S3 service.
+ // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
+ // Amazon S3: Virtual Hosting of Buckets
+ S3ForcePathStyle *bool
+
+ // Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
+ // header to PUT requests over 2MB of content. 100-Continue instructs the
+ // HTTP client not to send the body until the service responds with a
+ // `continue` status. This is useful to prevent sending the request body
+ // until after the request is authenticated, and validated.
+ //
+ // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
+ //
+ // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
+ // `ExpectContinueTimeout` for information on adjusting the continue wait
+ // timeout. https://golang.org/pkg/net/http/#Transport
+ //
+ // You should use this flag to disble 100-Continue if you experience issues
+ // with proxies or third party S3 compatible services.
+ S3Disable100Continue *bool
+
+ // Set this to `true` to enable S3 Accelerate feature. For all operations
+ // compatible with S3 Accelerate will use the accelerate endpoint for
+ // requests. Requests not compatible will fall back to normal S3 requests.
+ //
+ // The bucket must be enable for accelerate to be used with S3 client with
+ // accelerate enabled. If the bucket is not enabled for accelerate an error
+ // will be returned. The bucket name must be DNS compatible to also work
+ // with accelerate.
+ S3UseAccelerate *bool
+
+ // Set this to `true` to disable the EC2Metadata client from overriding the
+ // default http.Client's Timeout. This is helpful if you do not want the
+ // EC2Metadata client to create a new http.Client. This options is only
+ // meaningful if you're not already using a custom HTTP client with the
+ // SDK. Enabled by default.
+ //
+ // Must be set and provided to the session.NewSession() in order to disable
+ // the EC2Metadata overriding the timeout for default credentials chain.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(aws.NewConfig()
+ // .WithEC2MetadataDiableTimeoutOverride(true)))
+ //
+ // svc := s3.New(sess)
+ //
+ EC2MetadataDisableTimeoutOverride *bool
+
+ // Instructs the endpiont to be generated for a service client to
+ // be the dual stack endpoint. The dual stack endpoint will support
+ // both IPv4 and IPv6 addressing.
+ //
+ // Setting this for a service which does not support dual stack will fail
+ // to make requets. It is not recommended to set this value on the session
+ // as it will apply to all service clients created with the session. Even
+ // services which don't support dual stack endpoints.
+ //
+ // If the Endpoint config value is also provided the UseDualStack flag
+ // will be ignored.
+ //
+ // Only supported with.
+ //
+ // sess := session.Must(session.NewSession())
+ //
+ // svc := s3.New(sess, &aws.Config{
+ // UseDualStack: aws.Bool(true),
+ // })
+ UseDualStack *bool
+
+ // SleepDelay is an override for the func the SDK will call when sleeping
+ // during the lifecycle of a request. Specifically this will be used for
+ // request delays. This value should only be used for testing. To adjust
+ // the delay of a request see the aws/client.DefaultRetryer and
+ // aws/request.Retryer.
+ //
+ // SleepDelay will prevent any Context from being used for canceling retry
+ // delay of an API operation. It is recommended to not use SleepDelay at all
+ // and specify a Retryer instead.
+ SleepDelay func(time.Duration)
+
+ // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
+ // Will default to false. This would only be used for empty directory names in s3 requests.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(&aws.Config{
+ // DisableRestProtocolURICleaning: aws.Bool(true),
+ // }))
+ //
+ // svc := s3.New(sess)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("//foo//bar//moo"),
+ // })
+ DisableRestProtocolURICleaning *bool
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder
+// methods to set multiple configuration values inline without using pointers.
+//
+// // Create Session with MaxRetry configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(aws.NewConfig().
+// WithMaxRetries(3),
+// ))
+//
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, aws.NewConfig().
+// WithRegion("us-west-2"),
+// )
+func NewConfig() *Config {
+ return &Config{}
+}
+
+// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
+// a Config pointer.
+func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
+ c.CredentialsChainVerboseErrors = &verboseErrs
+ return c
+}
+
+// WithCredentials sets a config Credentials value returning a Config pointer
+// for chaining.
+func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
+ c.Credentials = creds
+ return c
+}
+
+// WithEndpoint sets a config Endpoint value returning a Config pointer for
+// chaining.
+func (c *Config) WithEndpoint(endpoint string) *Config {
+ c.Endpoint = &endpoint
+ return c
+}
+
+// WithEndpointResolver sets a config EndpointResolver value returning a
+// Config pointer for chaining.
+func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
+ c.EndpointResolver = resolver
+ return c
+}
+
+// WithRegion sets a config Region value returning a Config pointer for
+// chaining.
+func (c *Config) WithRegion(region string) *Config {
+ c.Region = &region
+ return c
+}
+
+// WithDisableSSL sets a config DisableSSL value returning a Config pointer
+// for chaining.
+func (c *Config) WithDisableSSL(disable bool) *Config {
+ c.DisableSSL = &disable
+ return c
+}
+
+// WithHTTPClient sets a config HTTPClient value returning a Config pointer
+// for chaining.
+func (c *Config) WithHTTPClient(client *http.Client) *Config {
+ c.HTTPClient = client
+ return c
+}
+
+// WithMaxRetries sets a config MaxRetries value returning a Config pointer
+// for chaining.
+func (c *Config) WithMaxRetries(max int) *Config {
+ c.MaxRetries = &max
+ return c
+}
+
+// WithDisableParamValidation sets a config DisableParamValidation value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableParamValidation(disable bool) *Config {
+ c.DisableParamValidation = &disable
+ return c
+}
+
+// WithDisableComputeChecksums sets a config DisableComputeChecksums value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
+ c.DisableComputeChecksums = &disable
+ return c
+}
+
+// WithLogLevel sets a config LogLevel value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogLevel(level LogLevelType) *Config {
+ c.LogLevel = &level
+ return c
+}
+
+// WithLogger sets a config Logger value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogger(logger Logger) *Config {
+ c.Logger = logger
+ return c
+}
+
+// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3ForcePathStyle(force bool) *Config {
+ c.S3ForcePathStyle = &force
+ return c
+}
+
+// WithS3Disable100Continue sets a config S3Disable100Continue value returning
+// a Config pointer for chaining.
+func (c *Config) WithS3Disable100Continue(disable bool) *Config {
+ c.S3Disable100Continue = &disable
+ return c
+}
+
+// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3UseAccelerate(enable bool) *Config {
+ c.S3UseAccelerate = &enable
+ return c
+}
+
+// WithUseDualStack sets a config UseDualStack value returning a Config
+// pointer for chaining.
+func (c *Config) WithUseDualStack(enable bool) *Config {
+ c.UseDualStack = &enable
+ return c
+}
+
+// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
+// returning a Config pointer for chaining.
+func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
+ c.EC2MetadataDisableTimeoutOverride = &enable
+ return c
+}
+
+// WithSleepDelay overrides the function used to sleep while waiting for the
+// next retry. Defaults to time.Sleep.
+func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
+ c.SleepDelay = fn
+ return c
+}
+
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+ for _, other := range cfgs {
+ mergeInConfig(c, other)
+ }
+}
+
+func mergeInConfig(dst *Config, other *Config) {
+ if other == nil {
+ return
+ }
+
+ if other.CredentialsChainVerboseErrors != nil {
+ dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
+ }
+
+ if other.Credentials != nil {
+ dst.Credentials = other.Credentials
+ }
+
+ if other.Endpoint != nil {
+ dst.Endpoint = other.Endpoint
+ }
+
+ if other.EndpointResolver != nil {
+ dst.EndpointResolver = other.EndpointResolver
+ }
+
+ if other.Region != nil {
+ dst.Region = other.Region
+ }
+
+ if other.DisableSSL != nil {
+ dst.DisableSSL = other.DisableSSL
+ }
+
+ if other.HTTPClient != nil {
+ dst.HTTPClient = other.HTTPClient
+ }
+
+ if other.LogLevel != nil {
+ dst.LogLevel = other.LogLevel
+ }
+
+ if other.Logger != nil {
+ dst.Logger = other.Logger
+ }
+
+ if other.MaxRetries != nil {
+ dst.MaxRetries = other.MaxRetries
+ }
+
+ if other.Retryer != nil {
+ dst.Retryer = other.Retryer
+ }
+
+ if other.DisableParamValidation != nil {
+ dst.DisableParamValidation = other.DisableParamValidation
+ }
+
+ if other.DisableComputeChecksums != nil {
+ dst.DisableComputeChecksums = other.DisableComputeChecksums
+ }
+
+ if other.S3ForcePathStyle != nil {
+ dst.S3ForcePathStyle = other.S3ForcePathStyle
+ }
+
+ if other.S3Disable100Continue != nil {
+ dst.S3Disable100Continue = other.S3Disable100Continue
+ }
+
+ if other.S3UseAccelerate != nil {
+ dst.S3UseAccelerate = other.S3UseAccelerate
+ }
+
+ if other.UseDualStack != nil {
+ dst.UseDualStack = other.UseDualStack
+ }
+
+ if other.EC2MetadataDisableTimeoutOverride != nil {
+ dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
+ }
+
+ if other.SleepDelay != nil {
+ dst.SleepDelay = other.SleepDelay
+ }
+
+ if other.DisableRestProtocolURICleaning != nil {
+ dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
+ }
+
+ if other.EnforceShouldRetryCheck != nil {
+ dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
+ }
+}
+
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c *Config) Copy(cfgs ...*Config) *Config {
+ dst := &Config{}
+ dst.MergeIn(c)
+
+ for _, cfg := range cfgs {
+ dst.MergeIn(cfg)
+ }
+
+ return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go
new file mode 100644
index 00000000..79f42685
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context.go
@@ -0,0 +1,71 @@
+package aws
+
+import (
+ "time"
+)
+
+// Context is an copy of the Go v1.7 stdlib's context.Context interface.
+// It is represented as a SDK interface to enable you to use the "WithContext"
+// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ Value(key interface{}) interface{}
+}
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+ return backgroundCtx
+}
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// Expects Context to always return a non-nil error if the Done channel is closed.
+func SleepWithContext(ctx Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
new file mode 100644
index 00000000..e8cf93d2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
@@ -0,0 +1,41 @@
+// +build !go1.7
+
+package aws
+
+import "time"
+
+// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This
+// is copied to provide a 1.6 and 1.5 safe version of context that is compatible
+// with Go 1.7's Context.
+//
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case backgroundCtx:
+ return "aws.BackgroundContext"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ backgroundCtx = new(emptyCtx)
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
new file mode 100644
index 00000000..064f75c9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
@@ -0,0 +1,9 @@
+// +build go1.7
+
+package aws
+
+import "context"
+
+var (
+ backgroundCtx = context.Background()
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
new file mode 100644
index 00000000..3b73a7da
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
@@ -0,0 +1,369 @@
+package aws
+
+import "time"
+
+// String returns a pointer to the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+ if v != nil {
+ return *v
+ }
+ return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+ dst := make([]*string, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+ dst := make([]string, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+ dst := make(map[string]*string)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+ dst := make(map[string]string)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Bool returns a pointer to the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+ if v != nil {
+ return *v
+ }
+ return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+ dst := make([]*bool, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+ dst := make([]bool, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+ dst := make(map[string]*bool)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int returns a pointer to the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+ dst := make([]*int, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+ dst := make([]int, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+ dst := make(map[string]*int)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+ dst := make(map[string]int)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int64 returns a pointer to the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+ dst := make([]*int64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+ dst := make([]int64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+ dst := make(map[string]*int64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+ dst := make(map[string]int64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float64 returns a pointer to the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+ dst := make([]*float64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+ dst := make([]float64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+ dst := make(map[string]*float64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+ dst := make(map[string]float64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Time returns a pointer to the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+ if v != nil {
+ return *v
+ }
+ return time.Time{}
+}
+
+// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
+// The result is undefined if the Unix time cannot be represented by an int64.
+// Which includes calling TimeUnixMilli on a zero Time is undefined.
+//
+// This utility is useful for service API's such as CloudWatch Logs which require
+// their unix time values to be in milliseconds.
+//
+// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
+func TimeUnixMilli(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+ dst := make([]*time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+ dst := make([]time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+ dst := make(map[string]*time.Time)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+ dst := make(map[string]time.Time)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
new file mode 100644
index 00000000..25b461c3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -0,0 +1,226 @@
+package corehandlers
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "runtime"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// Interface for matching types which also have a Len method.
+type lener interface {
+ Len() int
+}
+
+// BuildContentLengthHandler builds the content length of a request based on the body,
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
+// to determine request body length and no "Content-Length" was specified it will panic.
+//
+// The Content-Length will only be added to the request if the length of the body
+// is greater than 0. If the body is empty or the current `Content-Length`
+// header is <= 0, the header will also be stripped.
+var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
+ var length int64
+
+ if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
+ length, _ = strconv.ParseInt(slength, 10, 64)
+ } else {
+ switch body := r.Body.(type) {
+ case nil:
+ length = 0
+ case lener:
+ length = int64(body.Len())
+ case io.Seeker:
+ r.BodyStart, _ = body.Seek(0, 1)
+ end, _ := body.Seek(0, 2)
+ body.Seek(r.BodyStart, 0) // make sure to seek back to original location
+ length = end - r.BodyStart
+ default:
+ panic("Cannot get length of body, must provide `ContentLength`")
+ }
+ }
+
+ if length > 0 {
+ r.HTTPRequest.ContentLength = length
+ r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+ } else {
+ r.HTTPRequest.ContentLength = 0
+ r.HTTPRequest.Header.Del("Content-Length")
+ }
+}}
+
+// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
+var SDKVersionUserAgentHandler = request.NamedHandler{
+ Name: "core.SDKVersionUserAgentHandler",
+ Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
+ runtime.Version(), runtime.GOOS, runtime.GOARCH),
+}
+
+var reStatusCode = regexp.MustCompile(`^(\d{3})`)
+
+// ValidateReqSigHandler is a request handler to ensure that the request's
+// signature doesn't expire before it is sent. This can happen when a request
+// is built and signed significantly before it is sent. Or significant delays
+// occur when retrying requests that would cause the signature to expire.
+var ValidateReqSigHandler = request.NamedHandler{
+ Name: "core.ValidateReqSigHandler",
+ Fn: func(r *request.Request) {
+ // Unsigned requests are not signed
+ if r.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ signedTime := r.Time
+ if !r.LastSignedAt.IsZero() {
+ signedTime = r.LastSignedAt
+ }
+
+ // 10 minutes to allow for some clock skew/delays in transmission.
+ // Would be improved with aws/aws-sdk-go#423
+ if signedTime.Add(10 * time.Minute).After(time.Now()) {
+ return
+ }
+
+ fmt.Println("request expired, resigning")
+ r.Sign()
+ },
+}
+
+// SendHandler is a request handler to send service request using HTTP client.
+var SendHandler = request.NamedHandler{
+ Name: "core.SendHandler",
+ Fn: func(r *request.Request) {
+ sender := sendFollowRedirects
+ if r.DisableFollowRedirects {
+ sender = sendWithoutFollowRedirects
+ }
+
+ var err error
+ r.HTTPResponse, err = sender(r)
+ if err != nil {
+ handleSendError(r, err)
+ }
+ },
+}
+
+func sendFollowRedirects(r *request.Request) (*http.Response, error) {
+ return r.Config.HTTPClient.Do(r.HTTPRequest)
+}
+
+func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
+ transport := r.Config.HTTPClient.Transport
+ if transport == nil {
+ transport = http.DefaultTransport
+ }
+
+ return transport.RoundTrip(r.HTTPRequest)
+}
+
+func handleSendError(r *request.Request, err error) {
+ // Prevent leaking if an HTTPResponse was returned. Clean up
+ // the body.
+ if r.HTTPResponse != nil {
+ r.HTTPResponse.Body.Close()
+ }
+ // Capture the case where url.Error is returned for error processing
+ // response. e.g. 301 without location header comes back as string
+ // error and r.HTTPResponse is nil. Other URL redirect errors will
+ // comeback in a similar method.
+ if e, ok := err.(*url.Error); ok && e.Err != nil {
+ if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
+ code, _ := strconv.ParseInt(s[1], 10, 64)
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(code),
+ Status: http.StatusText(int(code)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ return
+ }
+ }
+ if r.HTTPResponse == nil {
+ // Add a dummy request response object to ensure the HTTPResponse
+ // value is consistent.
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(0),
+ Status: http.StatusText(int(0)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ }
+ // Catch all other request errors.
+ r.Error = awserr.New("RequestError", "send request failed", err)
+ r.Retryable = aws.Bool(true) // network errors are retryable
+
+ // Override the error with a context canceled error, if that was canceled.
+ ctx := r.Context()
+ select {
+ case <-ctx.Done():
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", ctx.Err())
+ r.Retryable = aws.Bool(false)
+ default:
+ }
+}
+
+// ValidateResponseHandler is a request handler to validate service response.
+var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
+ // this may be replaced by an UnmarshalError handler
+ r.Error = awserr.New("UnknownError", "unknown error", nil)
+ }
+}}
+
+// AfterRetryHandler performs final checks to determine if the request should
+// be retried and how long to delay.
+var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
+ r.Retryable = aws.Bool(r.ShouldRetry(r))
+ }
+
+ if r.WillRetry() {
+ r.RetryDelay = r.RetryRules(r)
+
+ if sleepFn := r.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(r.RetryDelay)
+ } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", err)
+ r.Retryable = aws.Bool(false)
+ return
+ }
+
+ // when the expired token exception occurs the credentials
+ // need to be expired locally so that the next request to
+ // get credentials will trigger a credentials refresh.
+ if r.IsErrorExpired() {
+ r.Config.Credentials.Expire()
+ }
+
+ r.RetryCount++
+ r.Error = nil
+ }
+}}
+
+// ValidateEndpointHandler is a request handler to validate a request had the
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
+// region is not valid.
+var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
+ if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
+ r.Error = aws.ErrMissingRegion
+ } else if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
new file mode 100644
index 00000000..7d50b155
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
@@ -0,0 +1,17 @@
+package corehandlers
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+// ValidateParametersHandler is a request handler to validate the input parameters.
+// Validating parameters only has meaning if done prior to the request being sent.
+var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
+ if !r.ParamsFilled() {
+ return
+ }
+
+ if v, ok := r.Params.(request.Validator); ok {
+ if err := v.Validate(); err != nil {
+ r.Error = err
+ }
+ }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
new file mode 100644
index 00000000..f298d659
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
@@ -0,0 +1,102 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+ // ErrNoValidProvidersFoundInChain Is returned when there are no valid
+ // providers in the ChainProvider.
+ //
+ // This has been deprecated. For verbose error messaging set
+ // aws.Config.CredentialsChainVerboseErrors to true
+ //
+ // @readonly
+ ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
+ `no valid providers in chain. Deprecated.
+ For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
+ nil)
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
+// In this example EnvProvider will first check if any credentials are available
+// via the environment variables. If there are none ChainProvider will check
+// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
+// does not return any credentials ChainProvider will return the error
+// ErrNoValidProvidersFoundInChain
+//
+// creds := credentials.NewChainCredentials(
+// []credentials.Provider{
+// &credentials.EnvProvider{},
+// &ec2rolecreds.EC2RoleProvider{
+// Client: ec2metadata.New(sess),
+// },
+// })
+//
+// // Usage of ChainCredentials with aws.Config
+// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: creds,
+// })))
+//
+type ChainProvider struct {
+ Providers []Provider
+ curr Provider
+ VerboseErrors bool
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return NewCredentials(&ChainProvider{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) Retrieve() (Value, error) {
+ var errs []error
+ for _, p := range c.Providers {
+ creds, err := p.Retrieve()
+ if err == nil {
+ c.curr = p
+ return creds, nil
+ }
+ errs = append(errs, err)
+ }
+ c.curr = nil
+
+ var err error
+ err = ErrNoValidProvidersFoundInChain
+ if c.VerboseErrors {
+ err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
+ }
+ return Value{}, err
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *ChainProvider) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
new file mode 100644
index 00000000..42416fc2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -0,0 +1,246 @@
+// Package credentials provides credential retrieval and management
+//
+// The Credentials is the primary method of getting access to and managing
+// credentials Values. Using dependency injection retrieval of the credential
+// values is handled by a object which satisfies the Provider interface.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials Value have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := credentials.NewEnvCredentials()
+//
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := credentials.NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+//
+package credentials
+
+import (
+ "sync"
+ "time"
+)
+
+// AnonymousCredentials is an empty Credential object that can be used as
+// dummy placeholder credentials for requests that do not need signed.
+//
+// This Credentials can be used to configure a service to not sign requests
+// when making service API calls. For example, when accessing public
+// s3 buckets.
+//
+// svc := s3.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: credentials.AnonymousCredentials,
+// })))
+// // Access public S3 buckets.
+//
+// @readonly
+var AnonymousCredentials = NewStaticCredentials("", "", "")
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Provider used to get credentials
+ ProviderName string
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The Provider should not need to implement its own mutexes, because
+// that will be managed by Credentials.
+type Provider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// An ErrorProvider is a stub credentials provider that always returns an error
+// this is used by the SDK when construction a known provider is not possible
+// due to an error.
+type ErrorProvider struct {
+ // The error to be returned from Retrieve
+ Err error
+
+ // The provider name to set on the Retrieved returned Value
+ ProviderName string
+}
+
+// Retrieve will always return the error that the ErrorProvider was created with.
+func (p ErrorProvider) Retrieve() (Value, error) {
+ return Value{ProviderName: p.ProviderName}, p.Err
+}
+
+// IsExpired will always return not expired.
+func (p ErrorProvider) IsExpired() bool {
+ return false
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+// type EC2RoleProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set. Available for testing
+ // to be able to mock out the current time.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ e.expiration = expiration
+ if window > 0 {
+ e.expiration = e.expiration.Add(-window)
+ }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ if e.CurrentTime == nil {
+ e.CurrentTime = time.Now
+ }
+ return e.expiration.Before(e.CurrentTime())
+}
+
+// A Credentials provides synchronous safe retrieval of AWS credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ creds Value
+ forceRefresh bool
+ m sync.Mutex
+
+ provider Provider
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be retrieved.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
new file mode 100644
index 00000000..c3974952
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -0,0 +1,178 @@
+package ec2rolecreds
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+)
+
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
+// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
+// or ExpiryWindow
+//
+// p := &ec2rolecreds.EC2RoleProvider{
+// // Pass in a custom timeout to be used when requesting
+// // IAM EC2 Role credentials.
+// Client: ec2metadata.New(sess, aws.Config{
+// HTTPClient: &http.Client{Timeout: 10 * time.Second},
+// }),
+//
+// // Do not use early expiry of credentials. If a non zero value is
+// // specified the credentials will be expired early
+// ExpiryWindow: 0,
+// }
+type EC2RoleProvider struct {
+ credentials.Expiry
+
+ // Required EC2Metadata client to use when connecting to EC2 metadata service.
+ Client *ec2metadata.EC2Metadata
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: ec2metadata.New(c),
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
+// metadata service.
+func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: client,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
+ credsList, err := requestCredList(m.Client)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ if len(credsList) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
+ }
+ credsName := credsList[0]
+
+ roleCreds, err := requestCred(m.Client, credsName)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+}
+
+const iamSecurityCredsPath = "/iam/security-credentials"
+
+// requestCredList requests a list of credentials from the EC2 service.
+// If there are no credentials, or there is an error making or receiving the request
+func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
+ resp, err := client.GetMetadata(iamSecurityCredsPath)
+ if err != nil {
+ return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(strings.NewReader(resp))
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
+ }
+
+ return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
+ resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
+ if err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("EC2RoleRequestError",
+ fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("SerializationError",
+ fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
+ }
+
+ return respCreds, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
new file mode 100644
index 00000000..a4cec5c5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
@@ -0,0 +1,191 @@
+// Package endpointcreds provides support for retrieving credentials from an
+// arbitrary HTTP endpoint.
+//
+// The credentials endpoint Provider can receive both static and refreshable
+// credentials that will expire. Credentials are static when an "Expiration"
+// value is not provided in the endpoint's response.
+//
+// Static credentials will never expire once they have been retrieved. The format
+// of the static credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// }
+//
+// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
+// value in the response. The format of the refreshable credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// "Token" : "AQoDY....=",
+// "Expiration" : "2016-02-25T06:03:31Z"
+// }
+//
+// Errors should be returned in the following format and only returned with 400
+// or 500 HTTP status codes.
+// {
+// "code": "ErrorCode",
+// "message": "Helpful error message."
+// }
+package endpointcreds
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ProviderName is the name of the credentials provider.
+const ProviderName = `CredentialsEndpointProvider`
+
+// Provider satisfies the credentials.Provider interface, and is a client to
+// retrieve credentials from an arbitrary endpoint.
+type Provider struct {
+ staticCreds bool
+ credentials.Expiry
+
+ // Requires a AWS Client to make HTTP requests to the endpoint with.
+ // the Endpoint the request will be made to is provided by the aws.Config's
+ // Endpoint value.
+ Client *client.Client
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewProviderClient returns a credentials Provider for retrieving AWS credentials
+// from arbitrary endpoint.
+func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
+ p := &Provider{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "CredentialsEndpoint",
+ Endpoint: endpoint,
+ },
+ handlers,
+ ),
+ }
+
+ p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
+ p.Client.Handlers.Validate.Clear()
+ p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
+// from an arbitrary endpoint concurrently. The client will request the
+func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
+ return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
+}
+
+// IsExpired returns true if the credentials retrieved are expired, or not yet
+// retrieved.
+func (p *Provider) IsExpired() bool {
+ if p.staticCreds {
+ return false
+ }
+ return p.Expiry.IsExpired()
+}
+
+// Retrieve will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) Retrieve() (credentials.Value, error) {
+ resp, err := p.getCredentials()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName},
+ awserr.New("CredentialsEndpointError", "failed to load credentials", err)
+ }
+
+ if resp.Expiration != nil {
+ p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
+ } else {
+ p.staticCreds = true
+ }
+
+ return credentials.Value{
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+type getCredentialsOutput struct {
+ Expiration *time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+}
+
+type errorOutput struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
+
+func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
+ op := &request.Operation{
+ Name: "GetCredentials",
+ HTTPMethod: "GET",
+ }
+
+ out := &getCredentialsOutput{}
+ req := p.Client.NewRequest(op, nil, out)
+ req.HTTPRequest.Header.Set("Accept", "application/json")
+
+ return out, req.Send()
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if len(r.ClientInfo.Endpoint) == 0 {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ out := r.Data.(*getCredentialsOutput)
+ if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to decode endpoint credentials",
+ err,
+ )
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ var errOut errorOutput
+ if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to decode endpoint credentials",
+ err,
+ )
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New(errOut.Code, errOut.Message, nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
new file mode 100644
index 00000000..c14231a1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
@@ -0,0 +1,78 @@
+package credentials
+
+import (
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// EnvProviderName provides a name of Env provider
+const EnvProviderName = "EnvProvider"
+
+var (
+ // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
+ // found in the process's environment.
+ //
+ // @readonly
+ ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
+
+ // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
+ // can't be found in the process's environment.
+ //
+ // @readonly
+ ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
+)
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process. Environment credentials never expire.
+//
+// Environment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+//
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
+type EnvProvider struct {
+ retrieved bool
+}
+
+// NewEnvCredentials returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvCredentials() *Credentials {
+ return NewCredentials(&EnvProvider{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ if id == "" {
+ return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
+ }
+
+ if secret == "" {
+ return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ ProviderName: EnvProviderName,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvProvider) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
new file mode 100644
index 00000000..7fb7cbf0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -0,0 +1,151 @@
+package credentials
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/go-ini/ini"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// SharedCredsProviderName provides a name of SharedCreds provider
+const SharedCredsProviderName = "SharedCredentialsProvider"
+
+var (
+ // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
+ //
+ // @readonly
+ ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
+)
+
+// A SharedCredentialsProvider retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type SharedCredentialsProvider struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ Filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ Profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewSharedCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewSharedCredentials(filename, profile string) *Credentials {
+ return NewCredentials(&SharedCredentialsProvider{
+ Filename: filename,
+ Profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
+ p.retrieved = false
+
+ filename, err := p.filename()
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ creds, err := loadProfile(filename, p.profile())
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ p.retrieved = true
+ return creds, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *SharedCredentialsProvider) IsExpired() bool {
+ return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (Value, error) {
+ config, err := ini.Load(filename)
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+ }
+ iniProfile, err := config.GetSection(profile)
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
+ }
+
+ id, err := iniProfile.GetKey("aws_access_key_id")
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
+ err)
+ }
+
+ secret, err := iniProfile.GetKey("aws_secret_access_key")
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
+ nil)
+ }
+
+ // Default to empty string if not found
+ token := iniProfile.Key("aws_session_token")
+
+ return Value{
+ AccessKeyID: id.String(),
+ SecretAccessKey: secret.String(),
+ SessionToken: token.String(),
+ ProviderName: SharedCredsProviderName,
+ }, nil
+}
+
+// filename returns the filename to use to read AWS shared credentials.
+//
+// Will return an error if the user's home directory path cannot be found.
+func (p *SharedCredentialsProvider) filename() (string, error) {
+ if p.Filename == "" {
+ if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
+ return p.Filename, nil
+ }
+
+ homeDir := os.Getenv("HOME") // *nix
+ if homeDir == "" { // Windows
+ homeDir = os.Getenv("USERPROFILE")
+ }
+ if homeDir == "" {
+ return "", ErrSharedCredentialsHomeNotFound
+ }
+
+ p.Filename = filepath.Join(homeDir, ".aws", "credentials")
+ }
+
+ return p.Filename, nil
+}
+
+// profile returns the AWS shared credentials profile. If empty will read
+// environment variable "AWS_PROFILE". If that is not set profile will
+// return "default".
+func (p *SharedCredentialsProvider) profile() string {
+ if p.Profile == "" {
+ p.Profile = os.Getenv("AWS_PROFILE")
+ }
+ if p.Profile == "" {
+ p.Profile = "default"
+ }
+
+ return p.Profile
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
new file mode 100644
index 00000000..4f5dab3f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
@@ -0,0 +1,57 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// StaticProviderName provides a name of Static provider
+const StaticProviderName = "StaticProvider"
+
+var (
+ // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
+ //
+ // @readonly
+ ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
+)
+
+// A StaticProvider is a set of credentials which are set programmatically,
+// and will never expire.
+type StaticProvider struct {
+ Value
+}
+
+// NewStaticCredentials returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStaticCredentials(id, secret, token string) *Credentials {
+ return NewCredentials(&StaticProvider{Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ }})
+}
+
+// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
+// wrapping the static credentials value provide. Same as NewStaticCredentials
+// but takes the creds Value instead of individual fields
+func NewStaticCredentialsFromCreds(creds Value) *Credentials {
+ return NewCredentials(&StaticProvider{Value: creds})
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
+ }
+
+ if len(s.Value.ProviderName) == 0 {
+ s.Value.ProviderName = StaticProviderName
+ }
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticProvider, the credentials never expired.
+func (s *StaticProvider) IsExpired() bool {
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
new file mode 100644
index 00000000..4108e433
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -0,0 +1,298 @@
+/*
+Package stscreds are credential Providers to retrieve STS AWS credentials.
+
+STS provides multiple ways to retrieve credentials which can be used when making
+future AWS service API operation calls.
+
+The SDK will ensure that per instance of credentials.Credentials all requests
+to refresh the credentials will be synchronized. But, the SDK is unable to
+ensure synchronous usage of the AssumeRoleProvider if the value is shared
+between multiple Credentials, Sessions or service clients.
+
+Assume Role
+
+To assume an IAM role using STS with the SDK you can create a new Credentials
+with the SDKs's stscreds package.
+
+ // Initial credentials loaded from SDK's default credential chain. Such as
+ // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+ // Role. These credentials will be used to to make the STS Assume Role API.
+ sess := session.Must(session.NewSession())
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN.
+ creds := stscreds.NewCredentials(sess, "myRoleArn")
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with static MFA Token
+
+To assume an IAM role with a MFA token you can either specify a MFA token code
+directly or provide a function to prompt the user each time the credentials
+need to refresh the role's credentials. Specifying the TokenCode should be used
+for short lived operations that will not need to be refreshed, and when you do
+not want to have direct control over the user provides their MFA token.
+
+With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
+credentials.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN using the MFA token code provided.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenCode = aws.String("00000000")
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with MFA Token Provider
+
+To assume an IAM role with MFA for longer running tasks where the credentials
+may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
+will allow the credential provider to prompt for new MFA token code when the
+role's credentials need to be refreshed.
+
+The StdinTokenProvider function is available to prompt on stdin to retrieve
+the MFA token code from the user. You can also implement custom prompts by
+satisfing the TokenProvider function signature.
+
+Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+have undesirable results as the StdinTokenProvider will not be synchronized. A
+single Credentials with an AssumeRoleProvider can be shared safely.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenProvider = stscreds.StdinTokenProvider
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+*/
+package stscreds
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/service/sts"
+)
+
+// StdinTokenProvider will prompt on stdout and read from stdin for a string value.
+// An error is returned if reading from stdin fails.
+//
+// Use this function go read MFA tokens from stdin. The function makes no attempt
+// to make atomic prompts from stdin across multiple gorouties.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely
+//
+// Will wait forever until something is provided on the stdin.
+func StdinTokenProvider() (string, error) {
+ var v string
+ fmt.Printf("Assume Role MFA token code: ")
+ _, err := fmt.Scanln(&v)
+
+ return v, err
+}
+
+// ProviderName provides a name of AssumeRole provider
+const ProviderName = "AssumeRoleProvider"
+
+// AssumeRoler represents the minimal subset of the STS client API used by this provider.
+type AssumeRoler interface {
+ AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
+}
+
+// DefaultDuration is the default amount of time in minutes that the credentials
+// will be valid for.
+var DefaultDuration = time.Duration(15) * time.Minute
+
+// AssumeRoleProvider retrieves temporary credentials from the STS service, and
+// keeps track of their expiration time.
+//
+// This credential provider will be used by the SDKs default credential change
+// when shared configuration is enabled, and the shared config or shared credentials
+// file configure assume role. See Session docs for how to do this.
+//
+// AssumeRoleProvider does not provide any synchronization and it is not safe
+// to share this value across multiple Credentials, Sessions, or service clients
+// without also sharing the same Credentials instance.
+type AssumeRoleProvider struct {
+ credentials.Expiry
+
+ // STS client to make assume role request with.
+ Client AssumeRoler
+
+ // Role to be assumed.
+ RoleARN string
+
+ // Session name, if you wish to reuse the credentials elsewhere.
+ RoleSessionName string
+
+ // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // Optional ExternalID to pass along, defaults to nil if not set.
+ ExternalID *string
+
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ SerialNumber *string
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // If SerialNumber is set and neither TokenCode nor TokenProvider are also
+ // set an error will be returned.
+ TokenCode *string
+
+ // Async method of providing MFA token code for assuming an IAM role with MFA.
+ // The value returned by the function will be used as the TokenCode in the Retrieve
+ // call. See StdinTokenProvider for a provider that prompts and reads from stdin.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed when SerialNumber is also set and
+ // TokenCode is not set.
+ //
+ // If both TokenCode and TokenProvider is set, TokenProvider will be used and
+ // TokenCode is ignored.
+ TokenProvider func() (string, error)
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes a Config provider to create the STS client. The ConfigProvider is
+// satisfied by the session.Session type.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
+func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: sts.New(c),
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes an AssumeRoler which can be satisfied by the STS client.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
+func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: svc,
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
+
+ // Apply defaults where parameters are not set.
+ if p.RoleSessionName == "" {
+ // Try to work out a role name that will hopefully end up unique.
+ p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
+ }
+ if p.Duration == 0 {
+ // Expire as often as AWS permits.
+ p.Duration = DefaultDuration
+ }
+ input := &sts.AssumeRoleInput{
+ DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
+ RoleArn: aws.String(p.RoleARN),
+ RoleSessionName: aws.String(p.RoleSessionName),
+ ExternalId: p.ExternalID,
+ }
+ if p.Policy != nil {
+ input.Policy = p.Policy
+ }
+ if p.SerialNumber != nil {
+ if p.TokenCode != nil {
+ input.SerialNumber = p.SerialNumber
+ input.TokenCode = p.TokenCode
+ } else if p.TokenProvider != nil {
+ input.SerialNumber = p.SerialNumber
+ code, err := p.TokenProvider()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+ input.TokenCode = aws.String(code)
+ } else {
+ return credentials.Value{ProviderName: ProviderName},
+ awserr.New("AssumeRoleTokenNotAvailable",
+ "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
+ }
+ }
+
+ roleOutput, err := p.Client.AssumeRole(input)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ // We will proactively generate new credentials before they expire.
+ p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: *roleOutput.Credentials.AccessKeyId,
+ SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
+ SessionToken: *roleOutput.Credentials.SessionToken,
+ ProviderName: ProviderName,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
new file mode 100644
index 00000000..07afe3b8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -0,0 +1,163 @@
+// Package defaults is a collection of helpers to retrieve the SDK's default
+// configuration and handlers.
+//
+// Generally this package shouldn't be used directly, but session.Session
+// instead. This package is useful when you need to reset the defaults
+// of a session or service client to the SDK defaults before setting
+// additional parameters.
+package defaults
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Defaults provides a collection of default values for SDK clients.
+type Defaults struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// Get returns the SDK's default values with Config and handlers pre-configured.
+func Get() Defaults {
+ cfg := Config()
+ handlers := Handlers()
+ cfg.Credentials = CredChain(cfg, handlers)
+
+ return Defaults{
+ Config: cfg,
+ Handlers: handlers,
+ }
+}
+
+// Config returns the default configuration without credentials.
+// To retrieve a config with credentials also included use
+// `defaults.Get().Config` instead.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the configuration of an
+// existing service client or session.
+func Config() *aws.Config {
+ return aws.NewConfig().
+ WithCredentials(credentials.AnonymousCredentials).
+ WithRegion(os.Getenv("AWS_REGION")).
+ WithHTTPClient(http.DefaultClient).
+ WithMaxRetries(aws.UseServiceDefaultRetries).
+ WithLogger(aws.NewDefaultLogger()).
+ WithLogLevel(aws.LogOff).
+ WithEndpointResolver(endpoints.DefaultResolver())
+}
+
+// Handlers returns the default request handlers.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the request handlers of an
+// existing service client or session.
+func Handlers() request.Handlers {
+ var handlers request.Handlers
+
+ handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+ handlers.Validate.AfterEachFn = request.HandlerListStopOnError
+ handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
+ handlers.Build.AfterEachFn = request.HandlerListStopOnError
+ handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
+ handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
+ handlers.Send.PushBackNamed(corehandlers.SendHandler)
+ handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
+ handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
+
+ return handlers
+}
+
+// CredChain returns the default credential chain.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the credentials of an
+// existing service client or session's Config.
+func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
+ return credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: []credentials.Provider{
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ RemoteCredProvider(*cfg, handlers),
+ },
+ })
+}
+
+const (
+ httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
+ ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+)
+
+// RemoteCredProvider returns a credentials provider for the default remote
+// endpoints such as EC2 or ECS Roles.
+func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
+ if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
+ return localHTTPCredProvider(cfg, handlers, u)
+ }
+
+ if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 {
+ u := fmt.Sprintf("http://169.254.170.2%s", uri)
+ return httpCredProvider(cfg, handlers, u)
+ }
+
+ return ec2RoleProvider(cfg, handlers)
+}
+
+func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ var errMsg string
+
+ parsed, err := url.Parse(u)
+ if err != nil {
+ errMsg = fmt.Sprintf("invalid URL, %v", err)
+ } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") {
+ errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host)
+ }
+
+ if len(errMsg) > 0 {
+ if cfg.Logger != nil {
+ cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
+ }
+ return credentials.ErrorProvider{
+ Err: awserr.New("CredentialsEndpointError", errMsg, err),
+ ProviderName: endpointcreds.ProviderName,
+ }
+ }
+
+ return httpCredProvider(cfg, handlers, u)
+}
+
+func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ return endpointcreds.NewProviderClient(cfg, handlers, u,
+ func(p *endpointcreds.Provider) {
+ p.ExpiryWindow = 5 * time.Minute
+ },
+ )
+}
+
+func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
+ resolver := cfg.EndpointResolver
+ if resolver == nil {
+ resolver = endpoints.DefaultResolver()
+ }
+
+ e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
+ return &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
+ ExpiryWindow: 5 * time.Minute,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
new file mode 100644
index 00000000..984407a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -0,0 +1,162 @@
+package ec2metadata
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// GetMetadata uses the path provided to request information from the EC2
+// instance metdata service. The content will be returned as a string, or
+// error if the request failed.
+func (c *EC2Metadata) GetMetadata(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetMetadata",
+ HTTPMethod: "GET",
+ HTTPPath: path.Join("/", "meta-data", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+
+ return output.Content, req.Send()
+}
+
+// GetUserData returns the userdata that was configured for the service. If
+// there is no user-data setup for the EC2 instance a "NotFoundError" error
+// code will be returned.
+func (c *EC2Metadata) GetUserData() (string, error) {
+ op := &request.Operation{
+ Name: "GetUserData",
+ HTTPMethod: "GET",
+ HTTPPath: path.Join("/", "user-data"),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == http.StatusNotFound {
+ r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
+ }
+ })
+
+ return output.Content, req.Send()
+}
+
+// GetDynamicData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetDynamicData",
+ HTTPMethod: "GET",
+ HTTPPath: path.Join("/", "dynamic", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+
+ return output.Content, req.Send()
+}
+
+// GetInstanceIdentityDocument retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
+ resp, err := c.GetDynamicData("instance-identity/document")
+ if err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 instance identity document", err)
+ }
+
+ doc := EC2InstanceIdentityDocument{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New("SerializationError",
+ "failed to decode EC2 instance identity document", err)
+ }
+
+ return doc, nil
+}
+
+// IAMInfo retrieves IAM info from the metadata API
+func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
+ resp, err := c.GetMetadata("iam/info")
+ if err != nil {
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 IAM info", err)
+ }
+
+ info := EC2IAMInfo{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
+ return EC2IAMInfo{},
+ awserr.New("SerializationError",
+ "failed to decode EC2 IAM info", err)
+ }
+
+ if info.Code != "Success" {
+ errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataError", errMsg, nil)
+ }
+
+ return info, nil
+}
+
+// Region returns the region the instance is running in.
+func (c *EC2Metadata) Region() (string, error) {
+ resp, err := c.GetMetadata("placement/availability-zone")
+ if err != nil {
+ return "", err
+ }
+
+ // returns region without the suffix. Eg: us-west-2a becomes us-west-2
+ return resp[:len(resp)-1], nil
+}
+
+// Available returns if the application has access to the EC2 Metadata service.
+// Can be used to determine if application is running within an EC2 Instance and
+// the metadata service is available.
+func (c *EC2Metadata) Available() bool {
+ if _, err := c.GetMetadata("instance-id"); err != nil {
+ return false
+ }
+
+ return true
+}
+
+// An EC2IAMInfo provides the shape for unmarshaling
+// an IAM info from the metadata API
+type EC2IAMInfo struct {
+ Code string
+ LastUpdated time.Time
+ InstanceProfileArn string
+ InstanceProfileID string
+}
+
+// An EC2InstanceIdentityDocument provides the shape for unmarshaling
+// an instance identity document
+type EC2InstanceIdentityDocument struct {
+ DevpayProductCodes []string `json:"devpayProductCodes"`
+ AvailabilityZone string `json:"availabilityZone"`
+ PrivateIP string `json:"privateIp"`
+ Version string `json:"version"`
+ Region string `json:"region"`
+ InstanceID string `json:"instanceId"`
+ BillingProducts []string `json:"billingProducts"`
+ InstanceType string `json:"instanceType"`
+ AccountID string `json:"accountId"`
+ PendingTime time.Time `json:"pendingTime"`
+ ImageID string `json:"imageId"`
+ KernelID string `json:"kernelId"`
+ RamdiskID string `json:"ramdiskId"`
+ Architecture string `json:"architecture"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
new file mode 100644
index 00000000..5b4379db
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -0,0 +1,124 @@
+// Package ec2metadata provides the client for making API calls to the
+// EC2 Metadata service.
+package ec2metadata
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ServiceName is the name of the service.
+const ServiceName = "ec2metadata"
+
+// A EC2Metadata is an EC2 Metadata service Client.
+type EC2Metadata struct {
+ *client.Client
+}
+
+// New creates a new instance of the EC2Metadata client with a session.
+// This client is safe to use across multiple goroutines.
+//
+//
+// Example:
+// // Create a EC2Metadata client from just a session.
+// svc := ec2metadata.New(mySession)
+//
+// // Create a EC2Metadata client with additional configuration
+// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// NewClient returns a new EC2Metadata client. Should be used to create
+// a client when not using a session. Generally using just New with a session
+// is preferred.
+//
+// If an unmodified HTTP client is provided from the stdlib default, or no client
+// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
+// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
+func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
+ if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
+ // If the http client is unmodified and this feature is not disabled
+ // set custom timeouts for EC2Metadata requests.
+ cfg.HTTPClient = &http.Client{
+ // use a shorter timeout than default because the metadata
+ // service is local if it is running, and to fail faster
+ // if not running on an ec2 instance.
+ Timeout: 5 * time.Second,
+ }
+ }
+
+ svc := &EC2Metadata{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ Endpoint: endpoint,
+ APIVersion: "latest",
+ },
+ handlers,
+ ),
+ }
+
+ svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ svc.Handlers.UnmarshalError.PushBack(unmarshalError)
+ svc.Handlers.Validate.Clear()
+ svc.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ // Add additional options to the service config
+ for _, option := range opts {
+ option(svc.Client)
+ }
+
+ return svc
+}
+
+func httpClientZero(c *http.Client) bool {
+ return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
+}
+
+type metadataOutput struct {
+ Content string
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
+ return
+ }
+
+ if data, ok := r.Data.(*metadataOutput); ok {
+ data.Content = b.String()
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
+ return
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
new file mode 100644
index 00000000..74f72de0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
@@ -0,0 +1,133 @@
+package endpoints
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+type modelDefinition map[string]json.RawMessage
+
+// A DecodeModelOptions are the options for how the endpoints model definition
+// are decoded.
+type DecodeModelOptions struct {
+ SkipCustomizations bool
+}
+
+// Set combines all of the option functions together.
+func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// DecodeModel unmarshals a Regions and Endpoint model definition file into
+// a endpoint Resolver. If the file format is not supported, or an error occurs
+// when unmarshaling the model an error will be returned.
+//
+// Casting the return value of this func to a EnumPartitions will
+// allow you to get a list of the partitions in the order the endpoints
+// will be resolved in.
+//
+// resolver, err := endpoints.DecodeModel(reader)
+//
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
+ var opts DecodeModelOptions
+ opts.Set(optFns...)
+
+ // Get the version of the partition file to determine what
+ // unmarshaling model to use.
+ modelDef := modelDefinition{}
+ if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ var version string
+ if b, ok := modelDef["version"]; ok {
+ version = string(b)
+ } else {
+ return nil, newDecodeModelError("endpoints version not found in model", nil)
+ }
+
+ if version == "3" {
+ return decodeV3Endpoints(modelDef, opts)
+ }
+
+ return nil, newDecodeModelError(
+ fmt.Sprintf("endpoints version %s, not supported", version), nil)
+}
+
+func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
+ b, ok := modelDef["partitions"]
+ if !ok {
+ return nil, newDecodeModelError("endpoints model missing partitions", nil)
+ }
+
+ ps := partitions{}
+ if err := json.Unmarshal(b, &ps); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ if opts.SkipCustomizations {
+ return ps, nil
+ }
+
+ // Customization
+ for i := 0; i < len(ps); i++ {
+ p := &ps[i]
+ custAddEC2Metadata(p)
+ custAddS3DualStack(p)
+ custRmIotDataService(p)
+ }
+
+ return ps, nil
+}
+
+func custAddS3DualStack(p *partition) {
+ if p.ID != "aws" {
+ return
+ }
+
+ s, ok := p.Services["s3"]
+ if !ok {
+ return
+ }
+
+ s.Defaults.HasDualStack = boxedTrue
+ s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
+
+ p.Services["s3"] = s
+}
+
+func custAddEC2Metadata(p *partition) {
+ p.Services["ec2metadata"] = service{
+ IsRegionalized: boxedFalse,
+ PartitionEndpoint: "aws-global",
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ }
+}
+
+func custRmIotDataService(p *partition) {
+ delete(p.Services, "data.iot")
+}
+
+type decodeModelError struct {
+ awsError
+}
+
+func newDecodeModelError(msg string, err error) decodeModelError {
+ return decodeModelError{
+ awsError: awserr.New("DecodeEndpointsModelError", msg, err),
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
new file mode 100644
index 00000000..90558c44
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -0,0 +1,2149 @@
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+// Partition identifiers
+const (
+ AwsPartitionID = "aws" // AWS Standard partition.
+ AwsCnPartitionID = "aws-cn" // AWS China partition.
+ AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
+)
+
+// AWS Standard partition's regions.
+const (
+ ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
+ ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
+ ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
+ ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
+ ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
+ CaCentral1RegionID = "ca-central-1" // Canada (Central).
+ EuCentral1RegionID = "eu-central-1" // EU (Frankfurt).
+ EuWest1RegionID = "eu-west-1" // EU (Ireland).
+ EuWest2RegionID = "eu-west-2" // EU (London).
+ SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
+ UsEast1RegionID = "us-east-1" // US East (N. Virginia).
+ UsEast2RegionID = "us-east-2" // US East (Ohio).
+ UsWest1RegionID = "us-west-1" // US West (N. California).
+ UsWest2RegionID = "us-west-2" // US West (Oregon).
+)
+
+// AWS China partition's regions.
+const (
+ CnNorth1RegionID = "cn-north-1" // China (Beijing).
+)
+
+// AWS GovCloud (US) partition's regions.
+const (
+ UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US).
+)
+
+// Service identifiers
+const (
+ AcmServiceID = "acm" // Acm.
+ ApigatewayServiceID = "apigateway" // Apigateway.
+ ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
+ Appstream2ServiceID = "appstream2" // Appstream2.
+ AutoscalingServiceID = "autoscaling" // Autoscaling.
+ BatchServiceID = "batch" // Batch.
+ BudgetsServiceID = "budgets" // Budgets.
+ ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
+ CloudformationServiceID = "cloudformation" // Cloudformation.
+ CloudfrontServiceID = "cloudfront" // Cloudfront.
+ CloudhsmServiceID = "cloudhsm" // Cloudhsm.
+ CloudsearchServiceID = "cloudsearch" // Cloudsearch.
+ CloudtrailServiceID = "cloudtrail" // Cloudtrail.
+ CodebuildServiceID = "codebuild" // Codebuild.
+ CodecommitServiceID = "codecommit" // Codecommit.
+ CodedeployServiceID = "codedeploy" // Codedeploy.
+ CodepipelineServiceID = "codepipeline" // Codepipeline.
+ CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
+ CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
+ CognitoSyncServiceID = "cognito-sync" // CognitoSync.
+ ConfigServiceID = "config" // Config.
+ CurServiceID = "cur" // Cur.
+ DatapipelineServiceID = "datapipeline" // Datapipeline.
+ DevicefarmServiceID = "devicefarm" // Devicefarm.
+ DirectconnectServiceID = "directconnect" // Directconnect.
+ DiscoveryServiceID = "discovery" // Discovery.
+ DmsServiceID = "dms" // Dms.
+ DsServiceID = "ds" // Ds.
+ DynamodbServiceID = "dynamodb" // Dynamodb.
+ Ec2ServiceID = "ec2" // Ec2.
+ Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
+ EcrServiceID = "ecr" // Ecr.
+ EcsServiceID = "ecs" // Ecs.
+ ElasticacheServiceID = "elasticache" // Elasticache.
+ ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
+ ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
+ ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
+ ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
+ ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
+ EmailServiceID = "email" // Email.
+ EsServiceID = "es" // Es.
+ EventsServiceID = "events" // Events.
+ FirehoseServiceID = "firehose" // Firehose.
+ GameliftServiceID = "gamelift" // Gamelift.
+ GlacierServiceID = "glacier" // Glacier.
+ HealthServiceID = "health" // Health.
+ IamServiceID = "iam" // Iam.
+ ImportexportServiceID = "importexport" // Importexport.
+ InspectorServiceID = "inspector" // Inspector.
+ IotServiceID = "iot" // Iot.
+ KinesisServiceID = "kinesis" // Kinesis.
+ KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
+ KmsServiceID = "kms" // Kms.
+ LambdaServiceID = "lambda" // Lambda.
+ LightsailServiceID = "lightsail" // Lightsail.
+ LogsServiceID = "logs" // Logs.
+ MachinelearningServiceID = "machinelearning" // Machinelearning.
+ MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
+ MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
+ MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
+ ModelsLexServiceID = "models.lex" // ModelsLex.
+ MonitoringServiceID = "monitoring" // Monitoring.
+ MturkRequesterServiceID = "mturk-requester" // MturkRequester.
+ OpsworksServiceID = "opsworks" // Opsworks.
+ OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
+ OrganizationsServiceID = "organizations" // Organizations.
+ PinpointServiceID = "pinpoint" // Pinpoint.
+ PollyServiceID = "polly" // Polly.
+ RdsServiceID = "rds" // Rds.
+ RedshiftServiceID = "redshift" // Redshift.
+ RekognitionServiceID = "rekognition" // Rekognition.
+ Route53ServiceID = "route53" // Route53.
+ Route53domainsServiceID = "route53domains" // Route53domains.
+ RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
+ S3ServiceID = "s3" // S3.
+ SdbServiceID = "sdb" // Sdb.
+ ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
+ ShieldServiceID = "shield" // Shield.
+ SmsServiceID = "sms" // Sms.
+ SnowballServiceID = "snowball" // Snowball.
+ SnsServiceID = "sns" // Sns.
+ SqsServiceID = "sqs" // Sqs.
+ SsmServiceID = "ssm" // Ssm.
+ StatesServiceID = "states" // States.
+ StoragegatewayServiceID = "storagegateway" // Storagegateway.
+ StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
+ StsServiceID = "sts" // Sts.
+ SupportServiceID = "support" // Support.
+ SwfServiceID = "swf" // Swf.
+ TaggingServiceID = "tagging" // Tagging.
+ WafServiceID = "waf" // Waf.
+ WafRegionalServiceID = "waf-regional" // WafRegional.
+ WorkdocsServiceID = "workdocs" // Workdocs.
+ WorkspacesServiceID = "workspaces" // Workspaces.
+ XrayServiceID = "xray" // Xray.
+)
+
+// DefaultResolver returns an Endpoint resolver that will be able
+// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+// Use DefaultPartitions() to get the list of the default partitions.
+func DefaultResolver() Resolver {
+ return defaultPartitions
+}
+
+// DefaultPartitions returns a list of the partitions the SDK is bundled
+// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+// partitions := endpoints.DefaultPartitions
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
+}
+
+var defaultPartitions = partitions{
+ awsPartition,
+ awscnPartition,
+ awsusgovPartition,
+}
+
+// AwsPartition returns the Resolver for AWS Standard.
+func AwsPartition() Partition {
+ return awsPartition.Partition()
+}
+
+var awsPartition = partition{
+ ID: "aws",
+ Name: "AWS Standard",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "ap-northeast-1": region{
+ Description: "Asia Pacific (Tokyo)",
+ },
+ "ap-northeast-2": region{
+ Description: "Asia Pacific (Seoul)",
+ },
+ "ap-south-1": region{
+ Description: "Asia Pacific (Mumbai)",
+ },
+ "ap-southeast-1": region{
+ Description: "Asia Pacific (Singapore)",
+ },
+ "ap-southeast-2": region{
+ Description: "Asia Pacific (Sydney)",
+ },
+ "ca-central-1": region{
+ Description: "Canada (Central)",
+ },
+ "eu-central-1": region{
+ Description: "EU (Frankfurt)",
+ },
+ "eu-west-1": region{
+ Description: "EU (Ireland)",
+ },
+ "eu-west-2": region{
+ Description: "EU (London)",
+ },
+ "sa-east-1": region{
+ Description: "South America (Sao Paulo)",
+ },
+ "us-east-1": region{
+ Description: "US East (N. Virginia)",
+ },
+ "us-east-2": region{
+ Description: "US East (Ohio)",
+ },
+ "us-west-1": region{
+ Description: "US West (N. California)",
+ },
+ "us-west-2": region{
+ Description: "US West (Oregon)",
+ },
+ },
+ Services: services{
+ "acm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appstream2": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Service: "appstream",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "batch": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "budgets": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "budgets.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "clouddirectory": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudfront": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "cloudfront.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudsearch": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codecommit": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codepipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-idp": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-sync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cur": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "datapipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "devicefarm": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "discovery": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecr": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.{service}.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elastictranscoder": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "email": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "gamelift": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "health": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "iam.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "importexport": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "importexport.amazonaws.com",
+ SignatureVersions: []string{"v2", "v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ Service: "IngestionService",
+ },
+ },
+ },
+ },
+ "inspector": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesisanalytics": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lightsail": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "machinelearning": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "marketplacecommerceanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mobileanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "models.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mturk-requester": service{
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "sandbox": endpoint{
+ Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
+ },
+ "us-east-1": endpoint{},
+ },
+ },
+ "opsworks": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "opsworks-cm": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "organizations.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "pinpoint": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "mobiletargeting",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "route53.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "route53domains": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "runtime.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "s3": service{
+ PartitionEndpoint: "us-east-1",
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "s3-ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{
+ Hostname: "s3-ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "s3-ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{
+ Hostname: "s3-eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "eu-west-2": endpoint{},
+ "s3-external-1": endpoint{
+ Hostname: "s3-external-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "s3-sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-1": endpoint{
+ Hostname: "s3.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{
+ Hostname: "s3-us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-west-2": endpoint{
+ Hostname: "s3-us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ },
+ },
+ "sdb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v2"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ Hostname: "sdb.amazonaws.com",
+ },
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "servicecatalog": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "shield": service{
+ IsRegionalized: boxedFalse,
+ Defaults: endpoint{
+ SSLCommonName: "Shield.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "queue.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "http", "https", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sts": service{
+ PartitionEndpoint: "aws-global",
+ Defaults: endpoint{
+ Hostname: "sts.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{
+ Hostname: "sts.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "aws-global": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "support": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "waf": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "waf.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workdocs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "xray": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsCnPartition returns the Resolver for AWS China.
+func AwsCnPartition() Partition {
+ return awscnPartition.Partition()
+}
+
+var awscnPartition = partition{
+ ID: "aws-cn",
+ Name: "AWS China",
+ DNSSuffix: "amazonaws.com.cn",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "cn-north-1": region{
+ Description: "China (Beijing)",
+ },
+ },
+ Services: services{
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "iam.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "http", "https", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsUsGovPartition returns the Resolver for AWS GovCloud (US).
+func AwsUsGovPartition() Partition {
+ return awsusgovPartition.Partition()
+}
+
+var awsusgovPartition = partition{
+ ID: "aws-us-gov",
+ Name: "AWS GovCloud (US)",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "us-gov-west-1": region{
+ Description: "AWS GovCloud (US)",
+ },
+ },
+ Services: services{
+ "autoscaling": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ec2": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "elasticmapreduce": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "glacier": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-us-gov-global": endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "s3-fips-us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "s3-us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "sns": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "sqs": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
new file mode 100644
index 00000000..a0e9bc45
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
@@ -0,0 +1,66 @@
+// Package endpoints provides the types and functionality for defining regions
+// and endpoints, as well as querying those definitions.
+//
+// The SDK's Regions and Endpoints metadata is code generated into the endpoints
+// package, and is accessible via the DefaultResolver function. This function
+// returns a endpoint Resolver will search the metadata and build an associated
+// endpoint if one is found. The default resolver will search all partitions
+// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
+// AWS GovCloud (US) (aws-us-gov).
+// .
+//
+// Enumerating Regions and Endpoint Metadata
+//
+// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
+// will allow you to get access to the list of underlying Partitions with the
+// Partitions method. This is helpful if you want to limit the SDK's endpoint
+// resolving to a single partition, or enumerate regions, services, and endpoints
+// in the partition.
+//
+// resolver := endpoints.DefaultResolver()
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+//
+// for _, p := range partitions {
+// fmt.Println("Regions for", p.Name)
+// for id, _ := range p.Regions() {
+// fmt.Println("*", id)
+// }
+//
+// fmt.Println("Services for", p.Name)
+// for id, _ := range p.Services() {
+// fmt.Println("*", id)
+// }
+// }
+//
+// Using Custom Endpoints
+//
+// The endpoints package also gives you the ability to use your own logic how
+// endpoints are resolved. This is a great way to define a custom endpoint
+// for select services, without passing that logic down through your code.
+//
+// If a type implements the Resolver interface it can be used to resolve
+// endpoints. To use this with the SDK's Session and Config set the value
+// of the type to the EndpointsResolver field of aws.Config when initializing
+// the session, or service client.
+//
+// In addition the ResolverFunc is a wrapper for a func matching the signature
+// of Resolver.EndpointFor, converting it to a type that satisfies the
+// Resolver interface.
+//
+//
+// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
+// if service == endpoints.S3ServiceID {
+// return endpoints.ResolvedEndpoint{
+// URL: "s3.custom.endpoint.com",
+// SigningRegion: "custom-signing-region",
+// }, nil
+// }
+//
+// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
+// }
+//
+// sess := session.Must(session.NewSession(&aws.Config{
+// Region: aws.String("us-west-2"),
+// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
+// }))
+package endpoints
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
new file mode 100644
index 00000000..9c3eedb4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
@@ -0,0 +1,439 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Options provide the configuration needed to direct how the
+// endpoints will be resolved.
+type Options struct {
+ // DisableSSL forces the endpoint to be resolved as HTTP.
+ // instead of HTTPS if the service supports it.
+ DisableSSL bool
+
+ // Sets the resolver to resolve the endpoint as a dualstack endpoint
+ // for the service. If dualstack support for a service is not known and
+ // StrictMatching is not enabled a dualstack endpoint for the service will
+ // be returned. This endpoint may not be valid. If StrictMatching is
+ // enabled only services that are known to support dualstack will return
+ // dualstack endpoints.
+ UseDualStack bool
+
+ // Enables strict matching of services and regions resolved endpoints.
+ // If the partition doesn't enumerate the exact service and region an
+ // error will be returned. This option will prevent returning endpoints
+ // that look valid, but may not resolve to any real endpoint.
+ StrictMatching bool
+
+ // Enables resolving a service endpoint based on the region provided if the
+ // service does not exist. The service endpoint ID will be used as the service
+ // domain name prefix. By default the endpoint resolver requires the service
+ // to be known when resolving endpoints.
+ //
+ // If resolving an endpoint on the partition list the provided region will
+ // be used to determine which partition's domain name pattern to the service
+ // endpoint ID with. If both the service and region are unkonwn and resolving
+ // the endpoint on partition list an UnknownEndpointError error will be returned.
+ //
+ // If resolving and endpoint on a partition specific resolver that partition's
+ // domain name pattern will be used with the service endpoint ID. If both
+ // region and service do not exist when resolving an endpoint on a specific
+ // partition the partition's domain pattern will be used to combine the
+ // endpoint and region together.
+ //
+ // This option is ignored if StrictMatching is enabled.
+ ResolveUnknownService bool
+}
+
+// Set combines all of the option functions together.
+func (o *Options) Set(optFns ...func(*Options)) {
+ for _, fn := range optFns {
+ fn(o)
+ }
+}
+
+// DisableSSLOption sets the DisableSSL options. Can be used as a functional
+// option when resolving endpoints.
+func DisableSSLOption(o *Options) {
+ o.DisableSSL = true
+}
+
+// UseDualStackOption sets the UseDualStack option. Can be used as a functional
+// option when resolving endpoints.
+func UseDualStackOption(o *Options) {
+ o.UseDualStack = true
+}
+
+// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
+// option when resolving endpoints.
+func StrictMatchingOption(o *Options) {
+ o.StrictMatching = true
+}
+
+// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used
+// as a functional option when resolving endpoints.
+func ResolveUnknownServiceOption(o *Options) {
+ o.ResolveUnknownService = true
+}
+
+// A Resolver provides the interface for functionality to resolve endpoints.
+// The build in Partition and DefaultResolver return value satisfy this interface.
+type Resolver interface {
+ EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+}
+
+// ResolverFunc is a helper utility that wraps a function so it satisfies the
+// Resolver interface. This is useful when you want to add additional endpoint
+// resolving logic, or stub out specific endpoints with custom values.
+type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+
+// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
+func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return fn(service, region, opts...)
+}
+
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
+// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
+//
+// If disableSSL is set, it will only set the URL's scheme if the URL does not
+// contain a scheme.
+func AddScheme(endpoint string, disableSSL bool) string {
+ if !schemeRE.MatchString(endpoint) {
+ scheme := "https"
+ if disableSSL {
+ scheme = "http"
+ }
+ endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
+ }
+
+ return endpoint
+}
+
+// EnumPartitions a provides a way to retrieve the underlying partitions that
+// make up the SDK's default Resolver, or any resolver decoded from a model
+// file.
+//
+// Use this interface with DefaultResolver and DecodeModels to get the list of
+// Partitions.
+type EnumPartitions interface {
+ Partitions() []Partition
+}
+
+// RegionsForService returns a map of regions for the partition and service.
+// If either the partition or service does not exist false will be returned
+// as the second parameter.
+//
+// This example shows how to get the regions for DynamoDB in the AWS partition.
+// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
+//
+// This is equivalent to using the partition directly.
+// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
+func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
+ for _, p := range ps {
+ if p.ID() != partitionID {
+ continue
+ }
+ if _, ok := p.p.Services[serviceID]; !ok {
+ break
+ }
+
+ s := Service{
+ id: serviceID,
+ p: p.p,
+ }
+ return s.Regions(), true
+ }
+
+ return map[string]Region{}, false
+}
+
+// PartitionForRegion returns the first partition which includes the region
+// passed in. This includes both known regions and regions which match
+// a pattern supported by the partition which may include regions that are
+// not explicitly known by the partition. Use the Regions method of the
+// returned Partition if explicit support is needed.
+func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
+ for _, p := range ps {
+ if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
+ return p, true
+ }
+ }
+
+ return Partition{}, false
+}
+
+// A Partition provides the ability to enumerate the partition's regions
+// and services.
+type Partition struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier of the partition.
+func (p Partition) ID() string { return p.id }
+
+// EndpointFor attempts to resolve the endpoint based on service and region.
+// See Options for information on configuring how the endpoint is resolved.
+//
+// If the service cannot be found in the metadata the UnknownServiceError
+// error will be returned. This validation will occur regardless if
+// StrictMatching is enabled. To enable resolving unknown services set the
+// "ResolveUnknownService" option to true. When StrictMatching is disabled
+// this option allows the partition resolver to resolve a endpoint based on
+// the service endpoint ID provided.
+//
+// When resolving endpoints you can choose to enable StrictMatching. This will
+// require the provided service and region to be known by the partition.
+// If the endpoint cannot be strictly resolved an error will be returned. This
+// mode is useful to ensure the endpoint resolved is valid. Without
+// StrictMatching enabled the endpoint returned my look valid but may not work.
+// StrictMatching requires the SDK to be updated if you want to take advantage
+// of new regions and services expansions.
+//
+// Errors that can be returned.
+// * UnknownServiceError
+// * UnknownEndpointError
+func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return p.p.EndpointFor(service, region, opts...)
+}
+
+// Regions returns a map of Regions indexed by their ID. This is useful for
+// enumerating over the regions in a partition.
+func (p Partition) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id := range p.p.Regions {
+ rs[id] = Region{
+ id: id,
+ p: p.p,
+ }
+ }
+
+ return rs
+}
+
+// Services returns a map of Service indexed by their ID. This is useful for
+// enumerating over the services in a partition.
+func (p Partition) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id := range p.p.Services {
+ ss[id] = Service{
+ id: id,
+ p: p.p,
+ }
+ }
+
+ return ss
+}
+
+// A Region provides information about a region, and ability to resolve an
+// endpoint from the context of a region, given a service.
+type Region struct {
+ id, desc string
+ p *partition
+}
+
+// ID returns the region's identifier.
+func (r Region) ID() string { return r.id }
+
+// ResolveEndpoint resolves an endpoint from the context of the region given
+// a service. See Partition.EndpointFor for usage and errors that can be returned.
+func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return r.p.EndpointFor(service, r.id, opts...)
+}
+
+// Services returns a list of all services that are known to be in this region.
+func (r Region) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id, s := range r.p.Services {
+ if _, ok := s.Endpoints[r.id]; ok {
+ ss[id] = Service{
+ id: id,
+ p: r.p,
+ }
+ }
+ }
+
+ return ss
+}
+
+// A Service provides information about a service, and ability to resolve an
+// endpoint from the context of a service, given a region.
+type Service struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier for the service.
+func (s Service) ID() string { return s.id }
+
+// ResolveEndpoint resolves an endpoint from the context of a service given
+// a region. See Partition.EndpointFor for usage and errors that can be returned.
+func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return s.p.EndpointFor(s.id, region, opts...)
+}
+
+// Regions returns a map of Regions that the service is present in.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id := range s.p.Services[s.id].Endpoints {
+ if _, ok := s.p.Regions[id]; ok {
+ rs[id] = Region{
+ id: id,
+ p: s.p,
+ }
+ }
+ }
+
+ return rs
+}
+
+// Endpoints returns a map of Endpoints indexed by their ID for all known
+// endpoints for a service.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Endpoints() map[string]Endpoint {
+ es := map[string]Endpoint{}
+ for id := range s.p.Services[s.id].Endpoints {
+ es[id] = Endpoint{
+ id: id,
+ serviceID: s.id,
+ p: s.p,
+ }
+ }
+
+ return es
+}
+
+// A Endpoint provides information about endpoints, and provides the ability
+// to resolve that endpoint for the service, and the region the endpoint
+// represents.
+type Endpoint struct {
+ id string
+ serviceID string
+ p *partition
+}
+
+// ID returns the identifier for an endpoint.
+func (e Endpoint) ID() string { return e.id }
+
+// ServiceID returns the identifier the endpoint belongs to.
+func (e Endpoint) ServiceID() string { return e.serviceID }
+
+// ResolveEndpoint resolves an endpoint from the context of a service and
+// region the endpoint represents. See Partition.EndpointFor for usage and
+// errors that can be returned.
+func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return e.p.EndpointFor(e.serviceID, e.id, opts...)
+}
+
+// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
+// service, and region.
+type ResolvedEndpoint struct {
+ // The endpoint URL
+ URL string
+
+ // The region that should be used for signing requests.
+ SigningRegion string
+
+ // The service name that should be used for signing requests.
+ SigningName string
+
+ // The signing method that should be used for signing requests.
+ SigningMethod string
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError awserr.Error
+
+// A EndpointNotFoundError is returned when in StrictMatching mode, and the
+// endpoint for the service and region cannot be found in any of the partitions.
+type EndpointNotFoundError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+}
+
+// A UnknownServiceError is returned when the service does not resolve to an
+// endpoint. Includes a list of all known services for the partition. Returned
+// when a partition does not support the service.
+type UnknownServiceError struct {
+ awsError
+ Partition string
+ Service string
+ Known []string
+}
+
+// NewUnknownServiceError builds and returns UnknownServiceError.
+func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
+ return UnknownServiceError{
+ awsError: awserr.New("UnknownServiceError",
+ "could not resolve endpoint for unknown service", nil),
+ Partition: p,
+ Service: s,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q",
+ e.Partition, e.Service)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) String() string {
+ return e.Error()
+}
+
+// A UnknownEndpointError is returned when in StrictMatching mode and the
+// service is valid, but the region does not resolve to an endpoint. Includes
+// a list of all known endpoints for the service.
+type UnknownEndpointError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+ Known []string
+}
+
+// NewUnknownEndpointError builds and returns UnknownEndpointError.
+func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
+ return UnknownEndpointError{
+ awsError: awserr.New("UnknownEndpointError",
+ "could not resolve endpoint", nil),
+ Partition: p,
+ Service: s,
+ Region: r,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
+ e.Partition, e.Service, e.Region)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) String() string {
+ return e.Error()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
new file mode 100644
index 00000000..13d968a2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
@@ -0,0 +1,303 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type partitions []partition
+
+func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ var opt Options
+ opt.Set(opts...)
+
+ for i := 0; i < len(ps); i++ {
+ if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) {
+ continue
+ }
+
+ return ps[i].EndpointFor(service, region, opts...)
+ }
+
+ // If loose matching fallback to first partition format to use
+ // when resolving the endpoint.
+ if !opt.StrictMatching && len(ps) > 0 {
+ return ps[0].EndpointFor(service, region, opts...)
+ }
+
+ return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
+}
+
+// Partitions satisfies the EnumPartitions interface and returns a list
+// of Partitions representing each partition represented in the SDK's
+// endpoints model.
+func (ps partitions) Partitions() []Partition {
+ parts := make([]Partition, 0, len(ps))
+ for i := 0; i < len(ps); i++ {
+ parts = append(parts, ps[i].Partition())
+ }
+
+ return parts
+}
+
+type partition struct {
+ ID string `json:"partition"`
+ Name string `json:"partitionName"`
+ DNSSuffix string `json:"dnsSuffix"`
+ RegionRegex regionRegex `json:"regionRegex"`
+ Defaults endpoint `json:"defaults"`
+ Regions regions `json:"regions"`
+ Services services `json:"services"`
+}
+
+func (p partition) Partition() Partition {
+ return Partition{
+ id: p.ID,
+ p: &p,
+ }
+}
+
+func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool {
+ s, hasService := p.Services[service]
+ _, hasEndpoint := s.Endpoints[region]
+
+ if hasEndpoint && hasService {
+ return true
+ }
+
+ if strictMatch {
+ return false
+ }
+
+ return p.RegionRegex.MatchString(region)
+}
+
+func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
+ var opt Options
+ opt.Set(opts...)
+
+ s, hasService := p.Services[service]
+ if !(hasService || opt.ResolveUnknownService) {
+ // Only return error if the resolver will not fallback to creating
+ // endpoint based on service endpoint ID passed in.
+ return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
+ }
+
+ e, hasEndpoint := s.endpointForRegion(region)
+ if !hasEndpoint && opt.StrictMatching {
+ return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
+ }
+
+ defs := []endpoint{p.Defaults, s.Defaults}
+ return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
+}
+
+func serviceList(ss services) []string {
+ list := make([]string, 0, len(ss))
+ for k := range ss {
+ list = append(list, k)
+ }
+ return list
+}
+func endpointList(es endpoints) []string {
+ list := make([]string, 0, len(es))
+ for k := range es {
+ list = append(list, k)
+ }
+ return list
+}
+
+type regionRegex struct {
+ *regexp.Regexp
+}
+
+func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
+ // Strip leading and trailing quotes
+ regex, err := strconv.Unquote(string(b))
+ if err != nil {
+ return fmt.Errorf("unable to strip quotes from regex, %v", err)
+ }
+
+ rr.Regexp, err = regexp.Compile(regex)
+ if err != nil {
+ return fmt.Errorf("unable to unmarshal region regex, %v", err)
+ }
+ return nil
+}
+
+type regions map[string]region
+
+type region struct {
+ Description string `json:"description"`
+}
+
+type services map[string]service
+
+type service struct {
+ PartitionEndpoint string `json:"partitionEndpoint"`
+ IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
+ Defaults endpoint `json:"defaults"`
+ Endpoints endpoints `json:"endpoints"`
+}
+
+func (s *service) endpointForRegion(region string) (endpoint, bool) {
+ if s.IsRegionalized == boxedFalse {
+ return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
+ }
+
+ if e, ok := s.Endpoints[region]; ok {
+ return e, true
+ }
+
+ // Unable to find any matching endpoint, return
+ // blank that will be used for generic endpoint creation.
+ return endpoint{}, false
+}
+
+type endpoints map[string]endpoint
+
+type endpoint struct {
+ Hostname string `json:"hostname"`
+ Protocols []string `json:"protocols"`
+ CredentialScope credentialScope `json:"credentialScope"`
+
+ // Custom fields not modeled
+ HasDualStack boxedBool `json:"-"`
+ DualStackHostname string `json:"-"`
+
+ // Signature Version not used
+ SignatureVersions []string `json:"signatureVersions"`
+
+ // SSLCommonName not used.
+ SSLCommonName string `json:"sslCommonName"`
+}
+
+const (
+ defaultProtocol = "https"
+ defaultSigner = "v4"
+)
+
+var (
+ protocolPriority = []string{"https", "http"}
+ signerPriority = []string{"v4", "v2"}
+)
+
+func getByPriority(s []string, p []string, def string) string {
+ if len(s) == 0 {
+ return def
+ }
+
+ for i := 0; i < len(p); i++ {
+ for j := 0; j < len(s); j++ {
+ if s[j] == p[i] {
+ return s[j]
+ }
+ }
+ }
+
+ return s[0]
+}
+
+func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
+ var merged endpoint
+ for _, def := range defs {
+ merged.mergeIn(def)
+ }
+ merged.mergeIn(e)
+ e = merged
+
+ hostname := e.Hostname
+
+ // Offset the hostname for dualstack if enabled
+ if opts.UseDualStack && e.HasDualStack == boxedTrue {
+ hostname = e.DualStackHostname
+ }
+
+ u := strings.Replace(hostname, "{service}", service, 1)
+ u = strings.Replace(u, "{region}", region, 1)
+ u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
+
+ scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
+ u = fmt.Sprintf("%s://%s", scheme, u)
+
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+ signingName := e.CredentialScope.Service
+ if len(signingName) == 0 {
+ signingName = service
+ }
+
+ return ResolvedEndpoint{
+ URL: u,
+ SigningRegion: signingRegion,
+ SigningName: signingName,
+ SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+ }
+}
+
+func getEndpointScheme(protocols []string, disableSSL bool) string {
+ if disableSSL {
+ return "http"
+ }
+
+ return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func (e *endpoint) mergeIn(other endpoint) {
+ if len(other.Hostname) > 0 {
+ e.Hostname = other.Hostname
+ }
+ if len(other.Protocols) > 0 {
+ e.Protocols = other.Protocols
+ }
+ if len(other.SignatureVersions) > 0 {
+ e.SignatureVersions = other.SignatureVersions
+ }
+ if len(other.CredentialScope.Region) > 0 {
+ e.CredentialScope.Region = other.CredentialScope.Region
+ }
+ if len(other.CredentialScope.Service) > 0 {
+ e.CredentialScope.Service = other.CredentialScope.Service
+ }
+ if len(other.SSLCommonName) > 0 {
+ e.SSLCommonName = other.SSLCommonName
+ }
+ if other.HasDualStack != boxedBoolUnset {
+ e.HasDualStack = other.HasDualStack
+ }
+ if len(other.DualStackHostname) > 0 {
+ e.DualStackHostname = other.DualStackHostname
+ }
+}
+
+type credentialScope struct {
+ Region string `json:"region"`
+ Service string `json:"service"`
+}
+
+type boxedBool int
+
+func (b *boxedBool) UnmarshalJSON(buf []byte) error {
+ v, err := strconv.ParseBool(string(buf))
+ if err != nil {
+ return err
+ }
+
+ if v {
+ *b = boxedTrue
+ } else {
+ *b = boxedFalse
+ }
+
+ return nil
+}
+
+const (
+ boxedBoolUnset boxedBool = iota
+ boxedFalse
+ boxedTrue
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
new file mode 100644
index 00000000..05e92df2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
@@ -0,0 +1,337 @@
+// +build codegen
+
+package endpoints
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "text/template"
+ "unicode"
+)
+
+// A CodeGenOptions are the options for code generating the endpoints into
+// Go code from the endpoints model definition.
+type CodeGenOptions struct {
+ // Options for how the model will be decoded.
+ DecodeModelOptions DecodeModelOptions
+}
+
+// Set combines all of the option functions together
+func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// CodeGenModel given a endpoints model file will decode it and attempt to
+// generate Go code from the model definition. Error will be returned if
+// the code is unable to be generated, or decoded.
+func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
+ var opts CodeGenOptions
+ opts.Set(optFns...)
+
+ resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
+ *d = opts.DecodeModelOptions
+ })
+ if err != nil {
+ return err
+ }
+
+ tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
+ if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil {
+ return fmt.Errorf("failed to execute template, %v", err)
+ }
+
+ return nil
+}
+
+func toSymbol(v string) string {
+ out := []rune{}
+ for _, c := range strings.Title(v) {
+ if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
+ continue
+ }
+
+ out = append(out, c)
+ }
+
+ return string(out)
+}
+
+func quoteString(v string) string {
+ return fmt.Sprintf("%q", v)
+}
+
+func regionConstName(p, r string) string {
+ return toSymbol(p) + toSymbol(r)
+}
+
+func partitionGetter(id string) string {
+ return fmt.Sprintf("%sPartition", toSymbol(id))
+}
+
+func partitionVarName(id string) string {
+ return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
+}
+
+func listPartitionNames(ps partitions) string {
+ names := []string{}
+ switch len(ps) {
+ case 1:
+ return ps[0].Name
+ case 2:
+ return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
+ default:
+ for i, p := range ps {
+ if i == len(ps)-1 {
+ names = append(names, "and "+p.Name)
+ } else {
+ names = append(names, p.Name)
+ }
+ }
+ return strings.Join(names, ", ")
+ }
+}
+
+func boxedBoolIfSet(msg string, v boxedBool) string {
+ switch v {
+ case boxedTrue:
+ return fmt.Sprintf(msg, "boxedTrue")
+ case boxedFalse:
+ return fmt.Sprintf(msg, "boxedFalse")
+ default:
+ return ""
+ }
+}
+
+func stringIfSet(msg, v string) string {
+ if len(v) == 0 {
+ return ""
+ }
+
+ return fmt.Sprintf(msg, v)
+}
+
+func stringSliceIfSet(msg string, vs []string) string {
+ if len(vs) == 0 {
+ return ""
+ }
+
+ names := []string{}
+ for _, v := range vs {
+ names = append(names, `"`+v+`"`)
+ }
+
+ return fmt.Sprintf(msg, strings.Join(names, ","))
+}
+
+func endpointIsSet(v endpoint) bool {
+ return !reflect.DeepEqual(v, endpoint{})
+}
+
+func serviceSet(ps partitions) map[string]struct{} {
+ set := map[string]struct{}{}
+ for _, p := range ps {
+ for id := range p.Services {
+ set[id] = struct{}{}
+ }
+ }
+
+ return set
+}
+
+var funcMap = template.FuncMap{
+ "ToSymbol": toSymbol,
+ "QuoteString": quoteString,
+ "RegionConst": regionConstName,
+ "PartitionGetter": partitionGetter,
+ "PartitionVarName": partitionVarName,
+ "ListPartitionNames": listPartitionNames,
+ "BoxedBoolIfSet": boxedBoolIfSet,
+ "StringIfSet": stringIfSet,
+ "StringSliceIfSet": stringSliceIfSet,
+ "EndpointIsSet": endpointIsSet,
+ "ServicesSet": serviceSet,
+}
+
+const v3Tmpl = `
+{{ define "defaults" -}}
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+ {{ template "partition consts" . }}
+
+ {{ range $_, $partition := . }}
+ {{ template "partition region consts" $partition }}
+ {{ end }}
+
+ {{ template "service consts" . }}
+
+ {{ template "endpoint resolvers" . }}
+{{- end }}
+
+{{ define "partition consts" }}
+ // Partition identifiers
+ const (
+ {{ range $_, $p := . -}}
+ {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "partition region consts" }}
+ // {{ .Name }} partition's regions.
+ const (
+ {{ range $id, $region := .Regions -}}
+ {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "service consts" }}
+ // Service identifiers
+ const (
+ {{ $serviceSet := ServicesSet . -}}
+ {{ range $id, $_ := $serviceSet -}}
+ {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "endpoint resolvers" }}
+ // DefaultResolver returns an Endpoint resolver that will be able
+ // to resolve endpoints for: {{ ListPartitionNames . }}.
+ //
+ // Use DefaultPartitions() to get the list of the default partitions.
+ func DefaultResolver() Resolver {
+ return defaultPartitions
+ }
+
+ // DefaultPartitions returns a list of the partitions the SDK is bundled
+ // with. The available partitions are: {{ ListPartitionNames . }}.
+ //
+ // partitions := endpoints.DefaultPartitions
+ // for _, p := range partitions {
+ // // ... inspect partitions
+ // }
+ func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
+ }
+
+ var defaultPartitions = partitions{
+ {{ range $_, $partition := . -}}
+ {{ PartitionVarName $partition.ID }},
+ {{ end }}
+ }
+
+ {{ range $_, $partition := . -}}
+ {{ $name := PartitionGetter $partition.ID -}}
+ // {{ $name }} returns the Resolver for {{ $partition.Name }}.
+ func {{ $name }}() Partition {
+ return {{ PartitionVarName $partition.ID }}.Partition()
+ }
+ var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
+ {{ end }}
+{{ end }}
+
+{{ define "default partitions" }}
+ func DefaultPartitions() []Partition {
+ return []partition{
+ {{ range $_, $partition := . -}}
+ // {{ ToSymbol $partition.ID}}Partition(),
+ {{ end }}
+ }
+ }
+{{ end }}
+
+{{ define "gocode Partition" -}}
+partition{
+ {{ StringIfSet "ID: %q,\n" .ID -}}
+ {{ StringIfSet "Name: %q,\n" .Name -}}
+ {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
+ RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults }},
+ {{- end }}
+ Regions: {{ template "gocode Regions" .Regions }},
+ Services: {{ template "gocode Services" .Services }},
+}
+{{- end }}
+
+{{ define "gocode RegionRegex" -}}
+regionRegex{
+ Regexp: func() *regexp.Regexp{
+ reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
+ return reg
+ }(),
+}
+{{- end }}
+
+{{ define "gocode Regions" -}}
+regions{
+ {{ range $id, $region := . -}}
+ "{{ $id }}": {{ template "gocode Region" $region }},
+ {{ end -}}
+}
+{{- end }}
+
+{{ define "gocode Region" -}}
+region{
+ {{ StringIfSet "Description: %q,\n" .Description -}}
+}
+{{- end }}
+
+{{ define "gocode Services" -}}
+services{
+ {{ range $id, $service := . -}}
+ "{{ $id }}": {{ template "gocode Service" $service }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Service" -}}
+service{
+ {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
+ {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults -}},
+ {{- end }}
+ {{ if .Endpoints -}}
+ Endpoints: {{ template "gocode Endpoints" .Endpoints }},
+ {{- end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoints" -}}
+endpoints{
+ {{ range $id, $endpoint := . -}}
+ "{{ $id }}": {{ template "gocode Endpoint" $endpoint }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoint" -}}
+endpoint{
+ {{ StringIfSet "Hostname: %q,\n" .Hostname -}}
+ {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
+ {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
+ {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
+ {{ if or .CredentialScope.Region .CredentialScope.Service -}}
+ CredentialScope: credentialScope{
+ {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
+ {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
+ },
+ {{- end }}
+ {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}}
+ {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}}
+
+}
+{{- end }}
+`
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
new file mode 100644
index 00000000..57663616
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
@@ -0,0 +1,17 @@
+package aws
+
+import "github.com/aws/aws-sdk-go/aws/awserr"
+
+var (
+ // ErrMissingRegion is an error that is returned if region configuration is
+ // not found.
+ //
+ // @readonly
+ ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
+
+ // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
+ // resolved for a service.
+ //
+ // @readonly
+ ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
new file mode 100644
index 00000000..91a6f277
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
@@ -0,0 +1,12 @@
+package aws
+
+// JSONValue is a representation of a grab bag type that will be marshaled
+// into a json string. This type can be used just like any other map.
+//
+// Example:
+//
+// values := aws.JSONValue{
+// "Foo": "Bar",
+// }
+// values["Baz"] = "Qux"
+type JSONValue map[string]interface{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
new file mode 100644
index 00000000..db87188e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
@@ -0,0 +1,112 @@
+package aws
+
+import (
+ "log"
+ "os"
+)
+
+// A LogLevelType defines the level logging should be performed at. Used to instruct
+// the SDK which statements should be logged.
+type LogLevelType uint
+
+// LogLevel returns the pointer to a LogLevel. Should be used to workaround
+// not being able to take the address of a non-composite literal.
+func LogLevel(l LogLevelType) *LogLevelType {
+ return &l
+}
+
+// Value returns the LogLevel value or the default value LogOff if the LogLevel
+// is nil. Safe to use on nil value LogLevelTypes.
+func (l *LogLevelType) Value() LogLevelType {
+ if l != nil {
+ return *l
+ }
+ return LogOff
+}
+
+// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
+// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
+// LogLevel is nill, will default to LogOff comparison.
+func (l *LogLevelType) Matches(v LogLevelType) bool {
+ c := l.Value()
+ return c&v == v
+}
+
+// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
+// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
+// to LogOff comparison.
+func (l *LogLevelType) AtLeast(v LogLevelType) bool {
+ c := l.Value()
+ return c >= v
+}
+
+const (
+ // LogOff states that no logging should be performed by the SDK. This is the
+ // default state of the SDK, and should be use to disable all logging.
+ LogOff LogLevelType = iota * 0x1000
+
+ // LogDebug state that debug output should be logged by the SDK. This should
+ // be used to inspect request made and responses received.
+ LogDebug
+)
+
+// Debug Logging Sub Levels
+const (
+ // LogDebugWithSigning states that the SDK should log request signing and
+ // presigning events. This should be used to log the signing details of
+ // requests for debugging. Will also enable LogDebug.
+ LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
+
+ // LogDebugWithHTTPBody states the SDK should log HTTP request and response
+ // HTTP bodys in addition to the headers and path. This should be used to
+ // see the body content of requests and responses made while using the SDK
+ // Will also enable LogDebug.
+ LogDebugWithHTTPBody
+
+ // LogDebugWithRequestRetries states the SDK should log when service requests will
+ // be retried. This should be used to log when you want to log when service
+ // requests are being retried. Will also enable LogDebug.
+ LogDebugWithRequestRetries
+
+ // LogDebugWithRequestErrors states the SDK should log when service requests fail
+ // to build, send, validate, or unmarshal.
+ LogDebugWithRequestErrors
+)
+
+// A Logger is a minimalistic interface for the SDK to log messages to. Should
+// be used to provide custom logging writers for the SDK to use.
+type Logger interface {
+ Log(...interface{})
+}
+
+// A LoggerFunc is a convenience type to convert a function taking a variadic
+// list of arguments and wrap it so the Logger interface can be used.
+//
+// Example:
+// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
+// fmt.Fprintln(os.Stdout, args...)
+// })})
+type LoggerFunc func(...interface{})
+
+// Log calls the wrapped function with the arguments provided
+func (f LoggerFunc) Log(args ...interface{}) {
+ f(args...)
+}
+
+// NewDefaultLogger returns a Logger which will write log messages to stdout, and
+// use same formatting runes as the stdlib log.Logger
+func NewDefaultLogger() Logger {
+ return &defaultLogger{
+ logger: log.New(os.Stdout, "", log.LstdFlags),
+ }
+}
+
+// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
+type defaultLogger struct {
+ logger *log.Logger
+}
+
+// Log logs the parameters to the stdlib logger. See log.Println.
+func (l defaultLogger) Log(args ...interface{}) {
+ l.logger.Println(args...)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
new file mode 100644
index 00000000..10fc8cb2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
@@ -0,0 +1,19 @@
+// +build !appengine
+
+package request
+
+import (
+ "net"
+ "os"
+ "syscall"
+)
+
+func isErrConnectionReset(err error) bool {
+ if opErr, ok := err.(*net.OpError); ok {
+ if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
+ return sysErr.Err == syscall.ECONNRESET
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_appengine.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_appengine.go
new file mode 100644
index 00000000..996196e7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_appengine.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package request
+
+import (
+ "strings"
+)
+
+func isErrConnectionReset(err error) bool {
+ return strings.Contains(err.Error(), "connection reset")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
new file mode 100644
index 00000000..6c14336f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -0,0 +1,225 @@
+package request
+
+import (
+ "fmt"
+ "strings"
+)
+
+// A Handlers provides a collection of request handlers for various
+// stages of handling requests.
+type Handlers struct {
+ Validate HandlerList
+ Build HandlerList
+ Sign HandlerList
+ Send HandlerList
+ ValidateResponse HandlerList
+ Unmarshal HandlerList
+ UnmarshalMeta HandlerList
+ UnmarshalError HandlerList
+ Retry HandlerList
+ AfterRetry HandlerList
+ Complete HandlerList
+}
+
+// Copy returns of this handler's lists.
+func (h *Handlers) Copy() Handlers {
+ return Handlers{
+ Validate: h.Validate.copy(),
+ Build: h.Build.copy(),
+ Sign: h.Sign.copy(),
+ Send: h.Send.copy(),
+ ValidateResponse: h.ValidateResponse.copy(),
+ Unmarshal: h.Unmarshal.copy(),
+ UnmarshalError: h.UnmarshalError.copy(),
+ UnmarshalMeta: h.UnmarshalMeta.copy(),
+ Retry: h.Retry.copy(),
+ AfterRetry: h.AfterRetry.copy(),
+ Complete: h.Complete.copy(),
+ }
+}
+
+// Clear removes callback functions for all handlers
+func (h *Handlers) Clear() {
+ h.Validate.Clear()
+ h.Build.Clear()
+ h.Send.Clear()
+ h.Sign.Clear()
+ h.Unmarshal.Clear()
+ h.UnmarshalMeta.Clear()
+ h.UnmarshalError.Clear()
+ h.ValidateResponse.Clear()
+ h.Retry.Clear()
+ h.AfterRetry.Clear()
+ h.Complete.Clear()
+}
+
+// A HandlerListRunItem represents an entry in the HandlerList which
+// is being run.
+type HandlerListRunItem struct {
+ Index int
+ Handler NamedHandler
+ Request *Request
+}
+
+// A HandlerList manages zero or more handlers in a list.
+type HandlerList struct {
+ list []NamedHandler
+
+ // Called after each request handler in the list is called. If set
+ // and the func returns true the HandlerList will continue to iterate
+ // over the request handlers. If false is returned the HandlerList
+ // will stop iterating.
+ //
+ // Should be used if extra logic to be performed between each handler
+ // in the list. This can be used to terminate a list's iteration
+ // based on a condition such as error like, HandlerListStopOnError.
+ // Or for logging like HandlerListLogItem.
+ AfterEachFn func(item HandlerListRunItem) bool
+}
+
+// A NamedHandler is a struct that contains a name and function callback.
+type NamedHandler struct {
+ Name string
+ Fn func(*Request)
+}
+
+// copy creates a copy of the handler list.
+func (l *HandlerList) copy() HandlerList {
+ n := HandlerList{
+ AfterEachFn: l.AfterEachFn,
+ }
+ if len(l.list) == 0 {
+ return n
+ }
+
+ n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
+ return n
+}
+
+// Clear clears the handler list.
+func (l *HandlerList) Clear() {
+ l.list = l.list[0:0]
+}
+
+// Len returns the number of handlers in the list.
+func (l *HandlerList) Len() int {
+ return len(l.list)
+}
+
+// PushBack pushes handler f to the back of the handler list.
+func (l *HandlerList) PushBack(f func(*Request)) {
+ l.PushBackNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushBackNamed pushes named handler f to the back of the handler list.
+func (l *HandlerList) PushBackNamed(n NamedHandler) {
+ if cap(l.list) == 0 {
+ l.list = make([]NamedHandler, 0, 5)
+ }
+ l.list = append(l.list, n)
+}
+
+// PushFront pushes handler f to the front of the handler list.
+func (l *HandlerList) PushFront(f func(*Request)) {
+ l.PushFrontNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushFrontNamed pushes named handler f to the front of the handler list.
+func (l *HandlerList) PushFrontNamed(n NamedHandler) {
+ if cap(l.list) == len(l.list) {
+ // Allocating new list required
+ l.list = append([]NamedHandler{n}, l.list...)
+ } else {
+ // Enough room to prepend into list.
+ l.list = append(l.list, NamedHandler{})
+ copy(l.list[1:], l.list)
+ l.list[0] = n
+ }
+}
+
+// Remove removes a NamedHandler n
+func (l *HandlerList) Remove(n NamedHandler) {
+ l.RemoveByName(n.Name)
+}
+
+// RemoveByName removes a NamedHandler by name.
+func (l *HandlerList) RemoveByName(name string) {
+ for i := 0; i < len(l.list); i++ {
+ m := l.list[i]
+ if m.Name == name {
+ // Shift array preventing creating new arrays
+ copy(l.list[i:], l.list[i+1:])
+ l.list[len(l.list)-1] = NamedHandler{}
+ l.list = l.list[:len(l.list)-1]
+
+ // decrement list so next check to length is correct
+ i--
+ }
+ }
+}
+
+// Run executes all handlers in the list with a given request object.
+func (l *HandlerList) Run(r *Request) {
+ for i, h := range l.list {
+ h.Fn(r)
+ item := HandlerListRunItem{
+ Index: i, Handler: h, Request: r,
+ }
+ if l.AfterEachFn != nil && !l.AfterEachFn(item) {
+ return
+ }
+ }
+}
+
+// HandlerListLogItem logs the request handler and the state of the
+// request's Error value. Always returns true to continue iterating
+// request handlers in a HandlerList.
+func HandlerListLogItem(item HandlerListRunItem) bool {
+ if item.Request.Config.Logger == nil {
+ return true
+ }
+ item.Request.Config.Logger.Log("DEBUG: RequestHandler",
+ item.Index, item.Handler.Name, item.Request.Error)
+
+ return true
+}
+
+// HandlerListStopOnError returns false to stop the HandlerList iterating
+// over request handlers if Request.Error is not nil. True otherwise
+// to continue iterating.
+func HandlerListStopOnError(item HandlerListRunItem) bool {
+ return item.Request.Error == nil
+}
+
+// WithAppendUserAgent will add a string to the user agent prefixed with a
+// single white space.
+func WithAppendUserAgent(s string) Option {
+ return func(r *Request) {
+ r.Handlers.Build.PushBack(func(r2 *Request) {
+ AddToUserAgent(r, s)
+ })
+ }
+}
+
+// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
+// header. If the extra parameters are provided they will be added as metadata to the
+// name/version pair resulting in the following format.
+// "name/version (extra0; extra1; ...)"
+// The user agent part will be concatenated with this current request's user agent string.
+func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
+ ua := fmt.Sprintf("%s/%s", name, version)
+ if len(extra) > 0 {
+ ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
+ }
+ return func(r *Request) {
+ AddToUserAgent(r, ua)
+ }
+}
+
+// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
+// The input string will be concatenated with the current request's user agent string.
+func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
+ return func(r *Request) {
+ AddToUserAgent(r, s)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
new file mode 100644
index 00000000..79f79602
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
@@ -0,0 +1,24 @@
+package request
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+)
+
+func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
+ req := new(http.Request)
+ *req = *r
+ req.URL = &url.URL{}
+ *req.URL = *r.URL
+ req.Body = body
+
+ req.Header = http.Header{}
+ for k, v := range r.Header {
+ for _, vv := range v {
+ req.Header.Add(k, vv)
+ }
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
new file mode 100644
index 00000000..02f07f4a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
@@ -0,0 +1,58 @@
+package request
+
+import (
+ "io"
+ "sync"
+)
+
+// offsetReader is a thread-safe io.ReadCloser to prevent racing
+// with retrying requests
+type offsetReader struct {
+ buf io.ReadSeeker
+ lock sync.Mutex
+ closed bool
+}
+
+func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
+ reader := &offsetReader{}
+ buf.Seek(offset, 0)
+
+ reader.buf = buf
+ return reader
+}
+
+// Close will close the instance of the offset reader's access to
+// the underlying io.ReadSeeker.
+func (o *offsetReader) Close() error {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+ o.closed = true
+ return nil
+}
+
+// Read is a thread-safe read of the underlying io.ReadSeeker
+func (o *offsetReader) Read(p []byte) (int, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ if o.closed {
+ return 0, io.EOF
+ }
+
+ return o.buf.Read(p)
+}
+
+// Seek is a thread-safe seeking operation.
+func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ return o.buf.Seek(offset, whence)
+}
+
+// CloseAndCopy will return a new offsetReader with a copy of the old buffer
+// and close the old buffer.
+func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
+ o.Close()
+ return newOffsetReader(o.buf, offset)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
new file mode 100644
index 00000000..4f4f1123
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -0,0 +1,575 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+)
+
+const (
+ // ErrCodeSerialization is the serialization error code that is received
+ // during protocol unmarshaling.
+ ErrCodeSerialization = "SerializationError"
+
+ // ErrCodeRead is an error that is returned during HTTP reads.
+ ErrCodeRead = "ReadError"
+
+ // ErrCodeResponseTimeout is the connection timeout error that is recieved
+ // during body reads.
+ ErrCodeResponseTimeout = "ResponseTimeout"
+
+ // CanceledErrorCode is the error code that will be returned by an
+ // API request that was canceled. Requests given a aws.Context may
+ // return this error when canceled.
+ CanceledErrorCode = "RequestCanceled"
+)
+
+// A Request is the service request to be made.
+type Request struct {
+ Config aws.Config
+ ClientInfo metadata.ClientInfo
+ Handlers Handlers
+
+ Retryer
+ Time time.Time
+ ExpireTime time.Duration
+ Operation *Operation
+ HTTPRequest *http.Request
+ HTTPResponse *http.Response
+ Body io.ReadSeeker
+ BodyStart int64 // offset from beginning of Body that the request body starts
+ Params interface{}
+ Error error
+ Data interface{}
+ RequestID string
+ RetryCount int
+ Retryable *bool
+ RetryDelay time.Duration
+ NotHoist bool
+ SignedHeaderVals http.Header
+ LastSignedAt time.Time
+ DisableFollowRedirects bool
+
+ context aws.Context
+
+ built bool
+
+ // Need to persist an intermediate body between the input Body and HTTP
+ // request body because the HTTP Client's transport can maintain a reference
+ // to the HTTP request's body after the client has returned. This value is
+ // safe to use concurrently and wrap the input Body for each HTTP request.
+ safeBody *offsetReader
+}
+
+// An Operation is the service API operation to be made.
+type Operation struct {
+ Name string
+ HTTPMethod string
+ HTTPPath string
+ *Paginator
+
+ BeforePresignFn func(r *Request) error
+}
+
+// New returns a new Request pointer for the service API
+// operation and parameters.
+//
+// Params is any value of input parameters to be the request payload.
+// Data is pointer value to an object which the request's response
+// payload will be deserialized to.
+func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
+ retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+
+ method := operation.HTTPMethod
+ if method == "" {
+ method = "POST"
+ }
+
+ httpReq, _ := http.NewRequest(method, "", nil)
+
+ var err error
+ httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
+ if err != nil {
+ httpReq.URL = &url.URL{}
+ err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
+ }
+
+ r := &Request{
+ Config: cfg,
+ ClientInfo: clientInfo,
+ Handlers: handlers.Copy(),
+
+ Retryer: retryer,
+ Time: time.Now(),
+ ExpireTime: 0,
+ Operation: operation,
+ HTTPRequest: httpReq,
+ Body: nil,
+ Params: params,
+ Error: err,
+ Data: data,
+ }
+ r.SetBufferBody([]byte{})
+
+ return r
+}
+
+// A Option is a functional option that can augment or modify a request when
+// using a WithContext API operation method.
+type Option func(*Request)
+
+// WithGetResponseHeader builds a request Option which will retrieve a single
+// header value from the HTTP Response. If there are multiple values for the
+// header key use WithGetResponseHeaders instead to access the http.Header
+// map directly. The passed in val pointer must be non-nil.
+//
+// This Option can be used multiple times with a single API operation.
+//
+// var id2, versionID string
+// svc.PutObjectWithContext(ctx, params,
+// request.WithGetResponseHeader("x-amz-id-2", &id2),
+// request.WithGetResponseHeader("x-amz-version-id", &versionID),
+// )
+func WithGetResponseHeader(key string, val *string) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *val = req.HTTPResponse.Header.Get(key)
+ })
+ }
+}
+
+// WithGetResponseHeaders builds a request Option which will retrieve the
+// headers from the HTTP response and assign them to the passed in headers
+// variable. The passed in headers pointer must be non-nil.
+//
+// var headers http.Header
+// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers))
+func WithGetResponseHeaders(headers *http.Header) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *headers = req.HTTPResponse.Header
+ })
+ }
+}
+
+// WithLogLevel is a request option that will set the request to use a specific
+// log level when the request is made.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody)
+func WithLogLevel(l aws.LogLevelType) Option {
+ return func(r *Request) {
+ r.Config.LogLevel = aws.LogLevel(l)
+ }
+}
+
+// ApplyOptions will apply each option to the request calling them in the order
+// the were provided.
+func (r *Request) ApplyOptions(opts ...Option) {
+ for _, opt := range opts {
+ opt(r)
+ }
+}
+
+// Context will always returns a non-nil context. If Request does not have a
+// context aws.BackgroundContext will be returned.
+func (r *Request) Context() aws.Context {
+ if r.context != nil {
+ return r.context
+ }
+ return aws.BackgroundContext()
+}
+
+// SetContext adds a Context to the current request that can be used to cancel
+// a in-flight request. The Context value must not be nil, or this method will
+// panic.
+//
+// Unlike http.Request.WithContext, SetContext does not return a copy of the
+// Request. It is not safe to use use a single Request value for multiple
+// requests. A new Request should be created for each API operation request.
+//
+// Go 1.6 and below:
+// The http.Request's Cancel field will be set to the Done() value of
+// the context. This will overwrite the Cancel field's value.
+//
+// Go 1.7 and above:
+// The http.Request.WithContext will be used to set the context on the underlying
+// http.Request. This will create a shallow copy of the http.Request. The SDK
+// may create sub contexts in the future for nested requests such as retries.
+func (r *Request) SetContext(ctx aws.Context) {
+ if ctx == nil {
+ panic("context cannot be nil")
+ }
+ setRequestContext(r, ctx)
+}
+
+// WillRetry returns if the request's can be retried.
+func (r *Request) WillRetry() bool {
+ return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
+}
+
+// ParamsFilled returns if the request's parameters have been populated
+// and the parameters are valid. False is returned if no parameters are
+// provided or invalid.
+func (r *Request) ParamsFilled() bool {
+ return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
+}
+
+// DataFilled returns true if the request's data for response deserialization
+// target has been set and is a valid. False is returned if data is not
+// set, or is invalid.
+func (r *Request) DataFilled() bool {
+ return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
+}
+
+// SetBufferBody will set the request's body bytes that will be sent to
+// the service API.
+func (r *Request) SetBufferBody(buf []byte) {
+ r.SetReaderBody(bytes.NewReader(buf))
+}
+
+// SetStringBody sets the body of the request to be backed by a string.
+func (r *Request) SetStringBody(s string) {
+ r.SetReaderBody(strings.NewReader(s))
+}
+
+// SetReaderBody will set the request's body reader.
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
+ r.Body = reader
+ r.ResetBody()
+}
+
+// Presign returns the request's signed URL. Error will be returned
+// if the signing fails.
+func (r *Request) Presign(expireTime time.Duration) (string, error) {
+ r.ExpireTime = expireTime
+ r.NotHoist = false
+
+ if r.Operation.BeforePresignFn != nil {
+ r = r.copy()
+ err := r.Operation.BeforePresignFn(r)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ r.Sign()
+ if r.Error != nil {
+ return "", r.Error
+ }
+ return r.HTTPRequest.URL.String(), nil
+}
+
+// PresignRequest behaves just like presign, but hoists all headers and signs them.
+// Also returns the signed hash back to the user
+func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
+ r.ExpireTime = expireTime
+ r.NotHoist = true
+ r.Sign()
+ if r.Error != nil {
+ return "", nil, r.Error
+ }
+ return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
+}
+
+func debugLogReqError(r *Request, stage string, retrying bool, err error) {
+ if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
+ return
+ }
+
+ retryStr := "not retrying"
+ if retrying {
+ retryStr = "will retry"
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
+ stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
+}
+
+// Build will build the request's object so it can be signed and sent
+// to the service. Build will also validate all the request's parameters.
+// Anny additional build Handlers set on this request will be run
+// in the order they were set.
+//
+// The request will only be built once. Multiple calls to build will have
+// no effect.
+//
+// If any Validate or Build errors occur the build will stop and the error
+// which occurred will be returned.
+func (r *Request) Build() error {
+ if !r.built {
+ r.Handlers.Validate.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Request", false, r.Error)
+ return r.Error
+ }
+ r.Handlers.Build.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+ r.built = true
+ }
+
+ return r.Error
+}
+
+// Sign will sign the request returning error if errors are encountered.
+//
+// Send will build the request prior to signing. All Sign Handlers will
+// be executed in the order they were set.
+func (r *Request) Sign() error {
+ r.Build()
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Sign.Run(r)
+ return r.Error
+}
+
+// ResetBody rewinds the request body backto its starting position, and
+// set's the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+func (r *Request) ResetBody() {
+ if r.safeBody != nil {
+ r.safeBody.Close()
+ }
+
+ r.safeBody = newOffsetReader(r.Body, r.BodyStart)
+
+ // Go 1.8 tightened and clarified the rules code needs to use when building
+ // requests with the http package. Go 1.8 removed the automatic detection
+ // of if the Request.Body was empty, or actually had bytes in it. The SDK
+ // always sets the Request.Body even if it is empty and should not actually
+ // be sent. This is incorrect.
+ //
+ // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http
+ // client that the request really should be sent without a body. The
+ // Request.Body cannot be set to nil, which is preferable, because the
+ // field is exported and could introduce nil pointer dereferences for users
+ // of the SDK if they used that field.
+ //
+ // Related golang/go#18257
+ l, err := computeBodyLength(r.Body)
+ if err != nil {
+ r.Error = awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
+ return
+ }
+
+ if l == 0 {
+ r.HTTPRequest.Body = noBodyReader
+ } else if l > 0 {
+ r.HTTPRequest.Body = r.safeBody
+ } else {
+ // Hack to prevent sending bodies for methods where the body
+ // should be ignored by the server. Sending bodies on these
+ // methods without an associated ContentLength will cause the
+ // request to socket timeout because the server does not handle
+ // Transfer-Encoding: chunked bodies for these methods.
+ //
+ // This would only happen if a aws.ReaderSeekerCloser was used with
+ // a io.Reader that was not also an io.Seeker.
+ switch r.Operation.HTTPMethod {
+ case "GET", "HEAD", "DELETE":
+ r.HTTPRequest.Body = noBodyReader
+ default:
+ r.HTTPRequest.Body = r.safeBody
+ }
+ }
+}
+
+// Attempts to compute the length of the body of the reader using the
+// io.Seeker interface. If the value is not seekable because of being
+// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned.
+// If no error occurs the length of the body will be returned.
+func computeBodyLength(r io.ReadSeeker) (int64, error) {
+ seekable := true
+ // Determine if the seeker is actually seekable. ReaderSeekerCloser
+ // hides the fact that a io.Readers might not actually be seekable.
+ switch v := r.(type) {
+ case aws.ReaderSeekerCloser:
+ seekable = v.IsSeeker()
+ case *aws.ReaderSeekerCloser:
+ seekable = v.IsSeeker()
+ }
+ if !seekable {
+ return -1, nil
+ }
+
+ curOffset, err := r.Seek(0, 1)
+ if err != nil {
+ return 0, err
+ }
+
+ endOffset, err := r.Seek(0, 2)
+ if err != nil {
+ return 0, err
+ }
+
+ _, err = r.Seek(curOffset, 0)
+ if err != nil {
+ return 0, err
+ }
+
+ return endOffset - curOffset, nil
+}
+
+// GetBody will return an io.ReadSeeker of the Request's underlying
+// input body with a concurrency safe wrapper.
+func (r *Request) GetBody() io.ReadSeeker {
+ return r.safeBody
+}
+
+// Send will send the request returning error if errors are encountered.
+//
+// Send will sign the request prior to sending. All Send Handlers will
+// be executed in the order they were set.
+//
+// Canceling a request is non-deterministic. If a request has been canceled,
+// then the transport will choose, randomly, one of the state channels during
+// reads or getting the connection.
+//
+// readLoop() and getConn(req *Request, cm connectMethod)
+// https://github.com/golang/go/blob/master/src/net/http/transport.go
+//
+// Send will not close the request.Request's body.
+func (r *Request) Send() error {
+ defer func() {
+ // Regardless of success or failure of the request trigger the Complete
+ // request handlers.
+ r.Handlers.Complete.Run(r)
+ }()
+
+ for {
+ if aws.BoolValue(r.Retryable) {
+ if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
+ r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
+ }
+
+ // The previous http.Request will have a reference to the r.Body
+ // and the HTTP Client's Transport may still be reading from
+ // the request's body even though the Client's Do returned.
+ r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
+ r.ResetBody()
+
+ // Closing response body to ensure that no response body is leaked
+ // between retry attempts.
+ if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
+ r.HTTPResponse.Body.Close()
+ }
+ }
+
+ r.Sign()
+ if r.Error != nil {
+ return r.Error
+ }
+
+ r.Retryable = nil
+
+ r.Handlers.Send.Run(r)
+ if r.Error != nil {
+ if !shouldRetryCancel(r) {
+ return r.Error
+ }
+
+ err := r.Error
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Send Request", false, r.Error)
+ return r.Error
+ }
+ debugLogReqError(r, "Send Request", true, err)
+ continue
+ }
+ r.Handlers.UnmarshalMeta.Run(r)
+ r.Handlers.ValidateResponse.Run(r)
+ if r.Error != nil {
+ err := r.Error
+ r.Handlers.UnmarshalError.Run(r)
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Response", false, r.Error)
+ return r.Error
+ }
+ debugLogReqError(r, "Validate Response", true, err)
+ continue
+ }
+
+ r.Handlers.Unmarshal.Run(r)
+ if r.Error != nil {
+ err := r.Error
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Unmarshal Response", false, r.Error)
+ return r.Error
+ }
+ debugLogReqError(r, "Unmarshal Response", true, err)
+ continue
+ }
+
+ break
+ }
+
+ return nil
+}
+
+// copy will copy a request which will allow for local manipulation of the
+// request.
+func (r *Request) copy() *Request {
+ req := &Request{}
+ *req = *r
+ req.Handlers = r.Handlers.Copy()
+ op := *r.Operation
+ req.Operation = &op
+ return req
+}
+
+// AddToUserAgent adds the string to the end of the request's current user agent.
+func AddToUserAgent(r *Request, s string) {
+ curUA := r.HTTPRequest.Header.Get("User-Agent")
+ if len(curUA) > 0 {
+ s = curUA + " " + s
+ }
+ r.HTTPRequest.Header.Set("User-Agent", s)
+}
+
+func shouldRetryCancel(r *Request) bool {
+ awsErr, ok := r.Error.(awserr.Error)
+ timeoutErr := false
+ errStr := r.Error.Error()
+ if ok {
+ if awsErr.Code() == CanceledErrorCode {
+ return false
+ }
+ err := awsErr.OrigErr()
+ netErr, netOK := err.(net.Error)
+ timeoutErr = netOK && netErr.Temporary()
+ if urlErr, ok := err.(*url.Error); !timeoutErr && ok {
+ errStr = urlErr.Err.Error()
+ }
+ }
+
+ // There can be two types of canceled errors here.
+ // The first being a net.Error and the other being an error.
+ // If the request was timed out, we want to continue the retry
+ // process. Otherwise, return the canceled error.
+ return timeoutErr ||
+ (errStr != "net/http: request canceled" &&
+ errStr != "net/http: request canceled while waiting for connection")
+
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
new file mode 100644
index 00000000..1323af90
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
@@ -0,0 +1,21 @@
+// +build !go1.8
+
+package request
+
+import "io"
+
+// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
+// and Close always returns nil. It can be used in an outgoing client
+// request to explicitly signal that a request has zero bytes.
+// An alternative, however, is to simply set Request.Body to nil.
+//
+// Copy of Go 1.8 NoBody type from net/http/http.go
+type noBody struct{}
+
+func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
+func (noBody) Close() error { return nil }
+func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
+
+// Is an empty reader that will trigger the Go HTTP client to not include
+// and body in the HTTP request.
+var noBodyReader = noBody{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
new file mode 100644
index 00000000..8b963f4d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
@@ -0,0 +1,9 @@
+// +build go1.8
+
+package request
+
+import "net/http"
+
+// Is a http.NoBody reader instructing Go HTTP client to not include
+// and body in the HTTP request.
+var noBodyReader = http.NoBody
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
new file mode 100644
index 00000000..a7365cd1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
@@ -0,0 +1,14 @@
+// +build go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest = r.HTTPRequest.WithContext(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
new file mode 100644
index 00000000..307fa070
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
@@ -0,0 +1,14 @@
+// +build !go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest.Cancel = ctx.Done()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
new file mode 100644
index 00000000..59de6736
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -0,0 +1,236 @@
+package request
+
+import (
+ "reflect"
+ "sync/atomic"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// A Pagination provides paginating of SDK API operations which are paginatable.
+// Generally you should not use this type directly, but use the "Pages" API
+// operations method to automatically perform pagination for you. Such as,
+// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods.
+//
+// Pagination differs from a Paginator type in that pagination is the type that
+// does the pagination between API operations, and Paginator defines the
+// configuration that will be used per page request.
+//
+// cont := true
+// for p.Next() && cont {
+// data := p.Page().(*s3.ListObjectsOutput)
+// // process the page's data
+// }
+// return p.Err()
+//
+// See service client API operation Pages methods for examples how the SDK will
+// use the Pagination type.
+type Pagination struct {
+ // Function to return a Request value for each pagination request.
+ // Any configuration or handlers that need to be applied to the request
+ // prior to getting the next page should be done here before the request
+ // returned.
+ //
+ // NewRequest should always be built from the same API operations. It is
+ // undefined if different API operations are returned on subsequent calls.
+ NewRequest func() (*Request, error)
+
+ started bool
+ nextTokens []interface{}
+
+ err error
+ curPage interface{}
+}
+
+// HasNextPage will return true if Pagination is able to determine that the API
+// operation has additional pages. False will be returned if there are no more
+// pages remaining.
+//
+// Will always return true if Next has not been called yet.
+func (p *Pagination) HasNextPage() bool {
+ return !(p.started && len(p.nextTokens) == 0)
+}
+
+// Err returns the error Pagination encountered when retrieving the next page.
+func (p *Pagination) Err() error {
+ return p.err
+}
+
+// Page returns the current page. Page should only be called after a successful
+// call to Next. It is undefined what Page will return if Page is called after
+// Next returns false.
+func (p *Pagination) Page() interface{} {
+ return p.curPage
+}
+
+// Next will attempt to retrieve the next page for the API operation. When a page
+// is retrieved true will be returned. If the page cannot be retrieved, or there
+// are no more pages false will be returned.
+//
+// Use the Page method to retrieve the current page data. The data will need
+// to be cast to the API operation's output type.
+//
+// Use the Err method to determine if an error occurred if Page returns false.
+func (p *Pagination) Next() bool {
+ if !p.HasNextPage() {
+ return false
+ }
+
+ req, err := p.NewRequest()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ if p.started {
+ for i, intok := range req.Operation.InputTokens {
+ awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i])
+ }
+ }
+ p.started = true
+
+ err = req.Send()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ p.nextTokens = req.nextPageTokens()
+ p.curPage = req.Data
+
+ return true
+}
+
+// A Paginator is the configuration data that defines how an API operation
+// should be paginated. This type is used by the API service models to define
+// the generated pagination config for service APIs.
+//
+// The Pagination type is what provides iterating between pages of an API. It
+// is only used to store the token metadata the SDK should use for performing
+// pagination.
+type Paginator struct {
+ InputTokens []string
+ OutputTokens []string
+ LimitToken string
+ TruncationToken string
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of data.
+func (r *Request) nextPageTokens() []interface{} {
+ if r.Operation.Paginator == nil {
+ return nil
+ }
+ if r.Operation.TruncationToken != "" {
+ tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
+ if len(tr) == 0 {
+ return nil
+ }
+
+ switch v := tr[0].(type) {
+ case *bool:
+ if !aws.BoolValue(v) {
+ return nil
+ }
+ case bool:
+ if v == false {
+ return nil
+ }
+ }
+ }
+
+ tokens := []interface{}{}
+ tokenAdded := false
+ for _, outToken := range r.Operation.OutputTokens {
+ v, _ := awsutil.ValuesAtPath(r.Data, outToken)
+ if len(v) > 0 {
+ tokens = append(tokens, v[0])
+ tokenAdded = true
+ } else {
+ tokens = append(tokens, nil)
+ }
+ }
+ if !tokenAdded {
+ return nil
+ }
+
+ return tokens
+}
+
+// Ensure a deprecated item is only logged once instead of each time its used.
+func logDeprecatedf(logger aws.Logger, flag *int32, msg string) {
+ if logger == nil {
+ return
+ }
+ if atomic.CompareAndSwapInt32(flag, 0, 1) {
+ logger.Log(msg)
+ }
+}
+
+var (
+ logDeprecatedHasNextPage int32
+ logDeprecatedNextPage int32
+ logDeprecatedEachPage int32
+)
+
+// HasNextPage returns true if this request has more pages of data available.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) HasNextPage() bool {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage,
+ "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ return len(r.nextPageTokens()) > 0
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) NextPage() *Request {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage,
+ "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ tokens := r.nextPageTokens()
+ if len(tokens) == 0 {
+ return nil
+ }
+
+ data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+ nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
+ for i, intok := range nr.Operation.InputTokens {
+ awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
+ }
+ return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+// func(page *T, lastPage bool) bool {
+// return true // return false to stop iterating
+// }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage,
+ "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ for page := r; page != nil; page = page.NextPage() {
+ if err := page.Send(); err != nil {
+ return err
+ }
+ if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
+ return page.Error
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
new file mode 100644
index 00000000..7af81de2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -0,0 +1,154 @@
+package request
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Retryer is an interface to control retry logic for a given service.
+// The default implementation used by most services is the service.DefaultRetryer
+// structure, which contains basic retry logic using exponential backoff.
+type Retryer interface {
+ RetryRules(*Request) time.Duration
+ ShouldRetry(*Request) bool
+ MaxRetries() int
+}
+
+// WithRetryer sets a config Retryer value to the given Config returning it
+// for chaining.
+func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
+ cfg.Retryer = retryer
+ return cfg
+}
+
+// retryableCodes is a collection of service response codes which are retry-able
+// without any further action.
+var retryableCodes = map[string]struct{}{
+ "RequestError": {},
+ "RequestTimeout": {},
+ ErrCodeResponseTimeout: {},
+ "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
+}
+
+var throttleCodes = map[string]struct{}{
+ "ProvisionedThroughputExceededException": {},
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once
+ "TooManyRequestsException": {}, // Lambda functions
+ "PriorRequestNotComplete": {}, // Route53
+}
+
+// credsExpiredCodes is a collection of error codes which signify the credentials
+// need to be refreshed. Expired tokens require refreshing of credentials, and
+// resigning before the request can be retried.
+var credsExpiredCodes = map[string]struct{}{
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ "RequestExpired": {}, // EC2 Only
+}
+
+func isCodeThrottle(code string) bool {
+ _, ok := throttleCodes[code]
+ return ok
+}
+
+func isCodeRetryable(code string) bool {
+ if _, ok := retryableCodes[code]; ok {
+ return true
+ }
+
+ return isCodeExpiredCreds(code)
+}
+
+func isCodeExpiredCreds(code string) bool {
+ _, ok := credsExpiredCodes[code]
+ return ok
+}
+
+var validParentCodes = map[string]struct{}{
+ ErrCodeSerialization: struct{}{},
+ ErrCodeRead: struct{}{},
+}
+
+func isNestedErrorRetryable(parentErr awserr.Error) bool {
+ if parentErr == nil {
+ return false
+ }
+
+ if _, ok := validParentCodes[parentErr.Code()]; !ok {
+ return false
+ }
+
+ err := parentErr.OrigErr()
+ if err == nil {
+ return false
+ }
+
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code())
+ }
+
+ return isErrConnectionReset(err)
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if error is nil.
+func IsErrorRetryable(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr)
+ }
+ }
+ return false
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if error is nil.
+func IsErrorThrottle(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeThrottle(aerr.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorExpiredCreds returns whether the error code is a credential expiry error.
+// Returns false if error is nil.
+func IsErrorExpiredCreds(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeExpiredCreds(aerr.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorRetryable
+func (r *Request) IsErrorRetryable() bool {
+ return IsErrorRetryable(r.Error)
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if the request has no Error set
+//
+// Alias for the utility function IsErrorThrottle
+func (r *Request) IsErrorThrottle() bool {
+ return IsErrorThrottle(r.Error)
+}
+
+// IsErrorExpired returns whether the error code is a credential expiry error.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorExpiredCreds
+func (r *Request) IsErrorExpired() bool {
+ return IsErrorExpiredCreds(r.Error)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
new file mode 100644
index 00000000..09a44eb9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
@@ -0,0 +1,94 @@
+package request
+
+import (
+ "io"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var timeoutErr = awserr.New(
+ ErrCodeResponseTimeout,
+ "read on body has reached the timeout limit",
+ nil,
+)
+
+type readResult struct {
+ n int
+ err error
+}
+
+// timeoutReadCloser will handle body reads that take too long.
+// We will return a ErrReadTimeout error if a timeout occurs.
+type timeoutReadCloser struct {
+ reader io.ReadCloser
+ duration time.Duration
+}
+
+// Read will spin off a goroutine to call the reader's Read method. We will
+// select on the timer's channel or the read's channel. Whoever completes first
+// will be returned.
+func (r *timeoutReadCloser) Read(b []byte) (int, error) {
+ timer := time.NewTimer(r.duration)
+ c := make(chan readResult, 1)
+
+ go func() {
+ n, err := r.reader.Read(b)
+ timer.Stop()
+ c <- readResult{n: n, err: err}
+ }()
+
+ select {
+ case data := <-c:
+ return data.n, data.err
+ case <-timer.C:
+ return 0, timeoutErr
+ }
+}
+
+func (r *timeoutReadCloser) Close() error {
+ return r.reader.Close()
+}
+
+const (
+ // HandlerResponseTimeout is what we use to signify the name of the
+ // response timeout handler.
+ HandlerResponseTimeout = "ResponseTimeoutHandler"
+)
+
+// adaptToResponseTimeoutError is a handler that will replace any top level error
+// to a ErrCodeResponseTimeout, if its child is that.
+func adaptToResponseTimeoutError(req *Request) {
+ if err, ok := req.Error.(awserr.Error); ok {
+ aerr, ok := err.OrigErr().(awserr.Error)
+ if ok && aerr.Code() == ErrCodeResponseTimeout {
+ req.Error = aerr
+ }
+ }
+}
+
+// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
+// This will allow for per read timeouts. If a timeout occurred, we will return the
+// ErrCodeResponseTimeout.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
+func WithResponseReadTimeout(duration time.Duration) Option {
+ return func(r *Request) {
+
+ var timeoutHandler = NamedHandler{
+ HandlerResponseTimeout,
+ func(req *Request) {
+ req.HTTPResponse.Body = &timeoutReadCloser{
+ reader: req.HTTPResponse.Body,
+ duration: duration,
+ }
+ }}
+
+ // remove the handler so we are not stomping over any new durations.
+ r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
+ r.Handlers.Send.PushBackNamed(timeoutHandler)
+
+ r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
+ r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
new file mode 100644
index 00000000..2520286b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
@@ -0,0 +1,234 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const (
+ // InvalidParameterErrCode is the error code for invalid parameters errors
+ InvalidParameterErrCode = "InvalidParameter"
+ // ParamRequiredErrCode is the error code for required parameter errors
+ ParamRequiredErrCode = "ParamRequiredError"
+ // ParamMinValueErrCode is the error code for fields with too low of a
+ // number value.
+ ParamMinValueErrCode = "ParamMinValueError"
+ // ParamMinLenErrCode is the error code for fields without enough elements.
+ ParamMinLenErrCode = "ParamMinLenError"
+)
+
+// Validator provides a way for types to perform validation logic on their
+// input values that external code can use to determine if a type's values
+// are valid.
+type Validator interface {
+ Validate() error
+}
+
+// An ErrInvalidParams provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type ErrInvalidParams struct {
+ // Context is the base context of the invalid parameter group.
+ Context string
+ errs []ErrInvalidParam
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
+ err.SetContext(e.Context)
+ e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another ErrInvalidParams
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
+ for _, err := range nested.errs {
+ err.SetContext(e.Context)
+ err.AddNestedContext(nestedCtx)
+ e.errs = append(e.errs, err)
+ }
+}
+
+// Len returns the number of invalid parameter errors
+func (e ErrInvalidParams) Len() int {
+ return len(e.errs)
+}
+
+// Code returns the code of the error
+func (e ErrInvalidParams) Code() string {
+ return InvalidParameterErrCode
+}
+
+// Message returns the message of the error
+func (e ErrInvalidParams) Message() string {
+ return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e ErrInvalidParams) Error() string {
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
+
+ for _, err := range e.errs {
+ fmt.Fprintf(w, "- %s\n", err.Message())
+ }
+
+ return w.String()
+}
+
+// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
+func (e ErrInvalidParams) OrigErr() error {
+ return awserr.NewBatchError(
+ InvalidParameterErrCode, e.Message(), e.OrigErrs())
+}
+
+// OrigErrs returns a slice of the invalid parameters
+func (e ErrInvalidParams) OrigErrs() []error {
+ errs := make([]error, len(e.errs))
+ for i := 0; i < len(errs); i++ {
+ errs[i] = e.errs[i]
+ }
+
+ return errs
+}
+
+// An ErrInvalidParam represents an invalid parameter error type.
+type ErrInvalidParam interface {
+ awserr.Error
+
+ // Field name the error occurred on.
+ Field() string
+
+ // SetContext updates the context of the error.
+ SetContext(string)
+
+ // AddNestedContext updates the error's context to include a nested level.
+ AddNestedContext(string)
+}
+
+type errInvalidParam struct {
+ context string
+ nestedContext string
+ field string
+ code string
+ msg string
+}
+
+// Code returns the error code for the type of invalid parameter.
+func (e *errInvalidParam) Code() string {
+ return e.code
+}
+
+// Message returns the reason the parameter was invalid, and its context.
+func (e *errInvalidParam) Message() string {
+ return fmt.Sprintf("%s, %s.", e.msg, e.Field())
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e *errInvalidParam) Error() string {
+ return fmt.Sprintf("%s: %s", e.code, e.Message())
+}
+
+// OrigErr returns nil, Implemented for awserr.Error interface.
+func (e *errInvalidParam) OrigErr() error {
+ return nil
+}
+
+// Field Returns the field and context the error occurred.
+func (e *errInvalidParam) Field() string {
+ field := e.context
+ if len(field) > 0 {
+ field += "."
+ }
+ if len(e.nestedContext) > 0 {
+ field += fmt.Sprintf("%s.", e.nestedContext)
+ }
+ field += e.field
+
+ return field
+}
+
+// SetContext updates the base context of the error.
+func (e *errInvalidParam) SetContext(ctx string) {
+ e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *errInvalidParam) AddNestedContext(ctx string) {
+ if len(e.nestedContext) == 0 {
+ e.nestedContext = ctx
+ } else {
+ e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+ }
+
+}
+
+// An ErrParamRequired represents an required parameter error.
+type ErrParamRequired struct {
+ errInvalidParam
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ErrParamRequired {
+ return &ErrParamRequired{
+ errInvalidParam{
+ code: ParamRequiredErrCode,
+ field: field,
+ msg: fmt.Sprintf("missing required field"),
+ },
+ }
+}
+
+// An ErrParamMinValue represents a minimum value parameter error.
+type ErrParamMinValue struct {
+ errInvalidParam
+ min float64
+}
+
+// NewErrParamMinValue creates a new minimum value parameter error.
+func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
+ return &ErrParamMinValue{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinValueErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field value of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinValue returns the field's require minimum value.
+//
+// float64 is returned for both int and float min values.
+func (e *ErrParamMinValue) MinValue() float64 {
+ return e.min
+}
+
+// An ErrParamMinLen represents a minimum length parameter error.
+type ErrParamMinLen struct {
+ errInvalidParam
+ min int
+}
+
+// NewErrParamMinLen creates a new minimum length parameter error.
+func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
+ return &ErrParamMinLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinValueErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field size of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinLen returns the field's required minimum length.
+func (e *ErrParamMinLen) MinLen() int {
+ return e.min
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
new file mode 100644
index 00000000..854b0854
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
@@ -0,0 +1,287 @@
+package request
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when
+// the waiter's max attempts have been exhausted.
+const WaiterResourceNotReadyErrorCode = "ResourceNotReady"
+
+// A WaiterOption is a function that will update the Waiter value's fields to
+// configure the waiter.
+type WaiterOption func(*Waiter)
+
+// WithWaiterMaxAttempts returns the maximum number of times the waiter should
+// attempt to check the resource for the target state.
+func WithWaiterMaxAttempts(max int) WaiterOption {
+ return func(w *Waiter) {
+ w.MaxAttempts = max
+ }
+}
+
+// WaiterDelay will return a delay the waiter should pause between attempts to
+// check the resource state. The passed in attempt is the number of times the
+// Waiter has checked the resource state.
+//
+// Attempt is the number of attempts the Waiter has made checking the resource
+// state.
+type WaiterDelay func(attempt int) time.Duration
+
+// ConstantWaiterDelay returns a WaiterDelay that will always return a constant
+// delay the waiter should use between attempts. It ignores the number of
+// attempts made.
+func ConstantWaiterDelay(delay time.Duration) WaiterDelay {
+ return func(attempt int) time.Duration {
+ return delay
+ }
+}
+
+// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in.
+func WithWaiterDelay(delayer WaiterDelay) WaiterOption {
+ return func(w *Waiter) {
+ w.Delay = delayer
+ }
+}
+
+// WithWaiterLogger returns a waiter option to set the logger a waiter
+// should use to log warnings and errors to.
+func WithWaiterLogger(logger aws.Logger) WaiterOption {
+ return func(w *Waiter) {
+ w.Logger = logger
+ }
+}
+
+// WithWaiterRequestOptions returns a waiter option setting the request
+// options for each request the waiter makes. Appends to waiter's request
+// options already set.
+func WithWaiterRequestOptions(opts ...Option) WaiterOption {
+ return func(w *Waiter) {
+ w.RequestOptions = append(w.RequestOptions, opts...)
+ }
+}
+
+// A Waiter provides the functionality to performing blocking call which will
+// wait for an resource state to be satisfied a service.
+//
+// This type should not be used directly. The API operations provided in the
+// service packages prefixed with "WaitUntil" should be used instead.
+type Waiter struct {
+ Name string
+ Acceptors []WaiterAcceptor
+ Logger aws.Logger
+
+ MaxAttempts int
+ Delay WaiterDelay
+
+ RequestOptions []Option
+ NewRequest func([]Option) (*Request, error)
+}
+
+// ApplyOptions updates the waiter with the list of waiter options provided.
+func (w *Waiter) ApplyOptions(opts ...WaiterOption) {
+ for _, fn := range opts {
+ fn(w)
+ }
+}
+
+// WaiterState are states the waiter uses based on WaiterAcceptor definitions
+// to identify if the resource state the waiter is waiting on has occurred.
+type WaiterState int
+
+// String returns the string representation of the waiter state.
+func (s WaiterState) String() string {
+ switch s {
+ case SuccessWaiterState:
+ return "success"
+ case FailureWaiterState:
+ return "failure"
+ case RetryWaiterState:
+ return "retry"
+ default:
+ return "unknown waiter state"
+ }
+}
+
+// States the waiter acceptors will use to identify target resource states.
+const (
+ SuccessWaiterState WaiterState = iota // waiter successful
+ FailureWaiterState // waiter failed
+ RetryWaiterState // waiter needs to be retried
+)
+
+// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor
+// definition's Expected attribute.
+type WaiterMatchMode int
+
+// Modes the waiter will use when inspecting API response to identify target
+// resource states.
+const (
+ PathAllWaiterMatch WaiterMatchMode = iota // match on all paths
+ PathWaiterMatch // match on specific path
+ PathAnyWaiterMatch // match on any path
+ PathListWaiterMatch // match on list of paths
+ StatusWaiterMatch // match on status code
+ ErrorWaiterMatch // match on error
+)
+
+// String returns the string representation of the waiter match mode.
+func (m WaiterMatchMode) String() string {
+ switch m {
+ case PathAllWaiterMatch:
+ return "pathAll"
+ case PathWaiterMatch:
+ return "path"
+ case PathAnyWaiterMatch:
+ return "pathAny"
+ case PathListWaiterMatch:
+ return "pathList"
+ case StatusWaiterMatch:
+ return "status"
+ case ErrorWaiterMatch:
+ return "error"
+ default:
+ return "unknown waiter match mode"
+ }
+}
+
+// WaitWithContext will make requests for the API operation using NewRequest to
+// build API requests. The request's response will be compared against the
+// Waiter's Acceptors to determine the successful state of the resource the
+// waiter is inspecting.
+//
+// The passed in context must not be nil. If it is nil a panic will occur. The
+// Context will be used to cancel the waiter's pending requests and retry delays.
+// Use aws.BackgroundContext if no context is available.
+//
+// The waiter will continue until the target state defined by the Acceptors,
+// or the max attempts expires.
+//
+// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's
+// retryer ShouldRetry returns false. This normally will happen when the max
+// wait attempts expires.
+func (w Waiter) WaitWithContext(ctx aws.Context) error {
+
+ for attempt := 1; ; attempt++ {
+ req, err := w.NewRequest(w.RequestOptions)
+ if err != nil {
+ waiterLogf(w.Logger, "unable to create request %v", err)
+ return err
+ }
+ req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter"))
+ err = req.Send()
+
+ // See if any of the acceptors match the request's response, or error
+ for _, a := range w.Acceptors {
+ if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched {
+ return matchErr
+ }
+ }
+
+ // The Waiter should only check the resource state MaxAttempts times
+ // This is here instead of in the for loop above to prevent delaying
+ // unnecessary when the waiter will not retry.
+ if attempt == w.MaxAttempts {
+ break
+ }
+
+ // Delay to wait before inspecting the resource again
+ delay := w.Delay(attempt)
+ if sleepFn := req.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(delay)
+ } else if err := aws.SleepWithContext(ctx, delay); err != nil {
+ return awserr.New(CanceledErrorCode, "waiter context canceled", err)
+ }
+ }
+
+ return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil)
+}
+
+// A WaiterAcceptor provides the information needed to wait for an API operation
+// to complete.
+type WaiterAcceptor struct {
+ State WaiterState
+ Matcher WaiterMatchMode
+ Argument string
+ Expected interface{}
+}
+
+// match returns if the acceptor found a match with the passed in request
+// or error. True is returned if the acceptor made a match, error is returned
+// if there was an error attempting to perform the match.
+func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) {
+ result := false
+ var vals []interface{}
+
+ switch a.Matcher {
+ case PathAllWaiterMatch, PathWaiterMatch:
+ // Require all matches to be equal for result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ if len(vals) == 0 {
+ break
+ }
+ result = true
+ for _, val := range vals {
+ if !awsutil.DeepEqual(val, a.Expected) {
+ result = false
+ break
+ }
+ }
+ case PathAnyWaiterMatch:
+ // Only a single match needs to equal for the result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ for _, val := range vals {
+ if awsutil.DeepEqual(val, a.Expected) {
+ result = true
+ break
+ }
+ }
+ case PathListWaiterMatch:
+ // ignored matcher
+ case StatusWaiterMatch:
+ s := a.Expected.(int)
+ result = s == req.HTTPResponse.StatusCode
+ case ErrorWaiterMatch:
+ if aerr, ok := err.(awserr.Error); ok {
+ result = aerr.Code() == a.Expected.(string)
+ }
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
+ name, a.Matcher)
+ }
+
+ if !result {
+ // If there was no matching result found there is nothing more to do
+ // for this response, retry the request.
+ return false, nil
+ }
+
+ switch a.State {
+ case SuccessWaiterState:
+ // waiter completed
+ return true, nil
+ case FailureWaiterState:
+ // Waiter failure state triggered
+ return true, awserr.New(WaiterResourceNotReadyErrorCode,
+ "failed waiting for successful resource state", err)
+ case RetryWaiterState:
+ // clear the error and retry the operation
+ return false, nil
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s",
+ name, a.State)
+ return false, nil
+ }
+}
+
+func waiterLogf(logger aws.Logger, msg string, args ...interface{}) {
+ if logger != nil {
+ logger.Log(fmt.Sprintf(msg, args...))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
new file mode 100644
index 00000000..2fe35e74
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
@@ -0,0 +1,274 @@
+/*
+Package session provides configuration for the SDK's service clients.
+
+Sessions can be shared across all service clients that share the same base
+configuration. The Session is built from the SDK's default configuration and
+request handlers.
+
+Sessions should be cached when possible, because creating a new Session will
+load all configuration values from the environment, and config files each time
+the Session is created. Sharing the Session value across all of your service
+clients will ensure the configuration is loaded the fewest number of times possible.
+
+Concurrency
+
+Sessions are safe to use concurrently as long as the Session is not being
+modified. The SDK will not modify the Session once the Session has been created.
+Creating service clients concurrently from a shared Session is safe.
+
+Sessions from Shared Config
+
+Sessions can be created using the method above that will only load the
+additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
+Alternatively you can explicitly create a Session with shared config enabled.
+To do this you can use NewSessionWithOptions to configure how the Session will
+be created. Using the NewSessionWithOptions with SharedConfigState set to
+SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG
+environment variable was set.
+
+Creating Sessions
+
+When creating Sessions optional aws.Config values can be passed in that will
+override the default, or loaded config values the Session is being created
+with. This allows you to provide additional, or case based, configuration
+as needed.
+
+By default NewSession will only load credentials from the shared credentials
+file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
+set to a truthy value the Session will be created from the configuration
+values from the shared config (~/.aws/config) and shared credentials
+(~/.aws/credentials) files. See the section Sessions from Shared Config for
+more information.
+
+Create a Session with the default config and request handlers. With credentials
+region, and profile loaded from the environment and shared config automatically.
+Requires the AWS_PROFILE to be set, or "default" is used.
+
+ // Create Session
+ sess := session.Must(session.NewSession())
+
+ // Create a Session with a custom region
+ sess := session.Must(session.NewSession(&aws.Config{
+ Region: aws.String("us-east-1"),
+ }))
+
+ // Create a S3 client instance from a session
+ sess := session.Must(session.NewSession())
+
+ svc := s3.New(sess)
+
+Create Session With Option Overrides
+
+In addition to NewSession, Sessions can be created using NewSessionWithOptions.
+This func allows you to control and override how the Session will be created
+through code instead of being driven by environment variables only.
+
+Use NewSessionWithOptions when you want to provide the config profile, or
+override the shared config state (AWS_SDK_LOAD_CONFIG).
+
+ // Equivalent to session.NewSession()
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ // Options
+ }))
+
+ // Specify profile to load for the session's config
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ Profile: "profile_name",
+ }))
+
+ // Specify profile for config and region for requests
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{Region: aws.String("us-east-1")},
+ Profile: "profile_name",
+ }))
+
+ // Force enable Shared Config support
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ SharedConfigState: session.SharedConfigEnable,
+ }))
+
+Adding Handlers
+
+You can add handlers to a session for processing HTTP requests. All service
+clients that use the session inherit the handlers. For example, the following
+handler logs every request and its payload made by a service client:
+
+ // Create a session, and add additional handlers for all service
+ // clients created with the Session to inherit. Adds logging handler.
+ sess := session.Must(session.NewSession())
+
+ sess.Handlers.Send.PushFront(func(r *request.Request) {
+ // Log every request made and its payload
+ logger.Println("Request: %s/%s, Payload: %s",
+ r.ClientInfo.ServiceName, r.Operation, r.Params)
+ })
+
+Deprecated "New" function
+
+The New session function has been deprecated because it does not provide good
+way to return errors that occur when loading the configuration files and values.
+Because of this, NewSession was created so errors can be retrieved when
+creating a session fails.
+
+Shared Config Fields
+
+By default the SDK will only load the shared credentials file's (~/.aws/credentials)
+credentials values, and all other config is provided by the environment variables,
+SDK defaults, and user provided aws.Config values.
+
+If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
+option is used to create the Session the full shared config values will be
+loaded. This includes credentials, region, and support for assume role. In
+addition the Session will load its configuration from both the shared config
+file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
+files have the same format.
+
+If both config files are present the configuration from both files will be
+read. The Session will be created from configuration values from the shared
+credentials file (~/.aws/credentials) over those in the shared credentials
+file (~/.aws/config).
+
+Credentials are the values the SDK should use for authenticating requests with
+AWS Services. They arfrom a configuration file will need to include both
+aws_access_key_id and aws_secret_access_key must be provided together in the
+same file to be considered valid. The values will be ignored if not a complete
+group. aws_session_token is an optional field that can be provided if both of
+the other two fields are also provided.
+
+ aws_access_key_id = AKID
+ aws_secret_access_key = SECRET
+ aws_session_token = TOKEN
+
+Assume Role values allow you to configure the SDK to assume an IAM role using
+a set of credentials provided in a config file via the source_profile field.
+Both "role_arn" and "source_profile" are required. The SDK supports assuming
+a role with MFA token if the session option AssumeRoleTokenProvider
+is set.
+
+ role_arn = arn:aws:iam::<account_number>:role/<role_name>
+ source_profile = profile_with_creds
+ external_id = 1234
+ mfa_serial = <serial or mfa arn>
+ role_session_name = session_name
+
+Region is the region the SDK should use for looking up AWS service endpoints
+and signing requests.
+
+ region = us-east-1
+
+Assume Role with MFA token
+
+To create a session with support for assuming an IAM role with MFA set the
+session option AssumeRoleTokenProvider to a function that will prompt for the
+MFA token code when the SDK assumes the role and refreshes the role's credentials.
+This allows you to configure the SDK via the shared config to assumea role
+with MFA tokens.
+
+In order for the SDK to assume a role with MFA the SharedConfigState
+session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG
+environment variable set.
+
+The shared configuration instructs the SDK to assume an IAM role with MFA
+when the mfa_serial configuration field is set in the shared config
+(~/.aws/config) or shared credentials (~/.aws/credentials) file.
+
+If mfa_serial is set in the configuration, the SDK will assume the role, and
+the AssumeRoleTokenProvider session option is not set an an error will
+be returned when creating the session.
+
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
+ }))
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess)
+
+To setup assume role outside of a session see the stscrds.AssumeRoleProvider
+documentation.
+
+Environment Variables
+
+When a Session is created several environment variables can be set to adjust
+how the SDK functions, and what configuration data it loads when creating
+Sessions. All environment values are optional, but some values like credentials
+require multiple of the values to set or the partial values will be ignored.
+All environment variable values are strings unless otherwise noted.
+
+Environment configuration values. If set both Access Key ID and Secret Access
+Key must be provided. Session Token and optionally also be provided, but is
+not required.
+
+ # Access Key ID
+ AWS_ACCESS_KEY_ID=AKID
+ AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+
+ # Secret Access Key
+ AWS_SECRET_ACCESS_KEY=SECRET
+ AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+
+ # Session Token
+ AWS_SESSION_TOKEN=TOKEN
+
+Region value will instruct the SDK where to make service API requests to. If is
+not provided in the environment the region must be provided before a service
+client request is made.
+
+ AWS_REGION=us-east-1
+
+ # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+ # and AWS_REGION is not also set.
+ AWS_DEFAULT_REGION=us-east-1
+
+Profile name the SDK should load use when loading shared config from the
+configuration files. If not provided "default" will be used as the profile name.
+
+ AWS_PROFILE=my_profile
+
+ # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+ # and AWS_PROFILE is not also set.
+ AWS_DEFAULT_PROFILE=my_profile
+
+SDK load config instructs the SDK to load the shared config in addition to
+shared credentials. This also expands the configuration loaded so the shared
+credentials will have parity with the shared config file. This also enables
+Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+env values as well.
+
+ AWS_SDK_LOAD_CONFIG=1
+
+Shared credentials file path can be set to instruct the SDK to use an alternative
+file for the shared credentials. If not set the file will be loaded from
+$HOME/.aws/credentials on Linux/Unix based systems, and
+%USERPROFILE%\.aws\credentials on Windows.
+
+ AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+
+Shared config file path can be set to instruct the SDK to use an alternative
+file for the shared config. If not set the file will be loaded from
+$HOME/.aws/config on Linux/Unix based systems, and
+%USERPROFILE%\.aws\config on Windows.
+
+ AWS_CONFIG_FILE=$HOME/my_shared_config
+
+Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
+will use instead of the default system's root CA bundle. Use this only
+if you want to replace the CA bundle the SDK uses for TLS requests.
+
+ AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+
+Enabling this option will attempt to merge the Transport into the SDK's HTTP
+client. If the client's Transport is not a http.Transport an error will be
+returned. If the Transport's TLS config is set this option will cause the SDK
+to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file
+contains multiple certificates all of them will be loaded.
+
+The Session option CustomCABundle is also available when creating sessions
+to also enable this feature. CustomCABundle session option field has priority
+over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+
+Setting a custom HTTPClient in the aws.Config options will override this setting.
+To use this option and custom HTTP client, the HTTP client needs to be provided
+when creating the session. Not the service client.
+*/
+package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
new file mode 100644
index 00000000..e6278a78
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -0,0 +1,208 @@
+package session
+
+import (
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+// envConfig is a collection of environment values the SDK will read
+// setup config from. All environment values are optional. But some values
+// such as credentials require multiple values to be complete or the values
+// will be ignored.
+type envConfig struct {
+ // Environment configuration values. If set both Access Key ID and Secret Access
+ // Key must be provided. Session Token and optionally also be provided, but is
+ // not required.
+ //
+ // # Access Key ID
+ // AWS_ACCESS_KEY_ID=AKID
+ // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+ //
+ // # Secret Access Key
+ // AWS_SECRET_ACCESS_KEY=SECRET
+ // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+ //
+ // # Session Token
+ // AWS_SESSION_TOKEN=TOKEN
+ Creds credentials.Value
+
+ // Region value will instruct the SDK where to make service API requests to. If is
+ // not provided in the environment the region must be provided before a service
+ // client request is made.
+ //
+ // AWS_REGION=us-east-1
+ //
+ // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+ // # and AWS_REGION is not also set.
+ // AWS_DEFAULT_REGION=us-east-1
+ Region string
+
+ // Profile name the SDK should load use when loading shared configuration from the
+ // shared configuration files. If not provided "default" will be used as the
+ // profile name.
+ //
+ // AWS_PROFILE=my_profile
+ //
+ // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+ // # and AWS_PROFILE is not also set.
+ // AWS_DEFAULT_PROFILE=my_profile
+ Profile string
+
+ // SDK load config instructs the SDK to load the shared config in addition to
+ // shared credentials. This also expands the configuration loaded from the shared
+ // credentials to have parity with the shared config file. This also enables
+ // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+ // env values as well.
+ //
+ // AWS_SDK_LOAD_CONFIG=1
+ EnableSharedConfig bool
+
+ // Shared credentials file path can be set to instruct the SDK to use an alternate
+ // file for the shared credentials. If not set the file will be loaded from
+ // $HOME/.aws/credentials on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\credentials on Windows.
+ //
+ // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+ SharedCredentialsFile string
+
+ // Shared config file path can be set to instruct the SDK to use an alternate
+ // file for the shared config. If not set the file will be loaded from
+ // $HOME/.aws/config on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\config on Windows.
+ //
+ // AWS_CONFIG_FILE=$HOME/my_shared_config
+ SharedConfigFile string
+
+ // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file
+ // that the SDK will use instead of the the system's root CA bundle.
+ // Only use this if you want to configure the SDK to use a custom set
+ // of CAs.
+ //
+ // Enabling this option will attempt to merge the Transport
+ // into the SDK's HTTP client. If the client's Transport is
+ // not a http.Transport an error will be returned. If the
+ // Transport's TLS config is set this option will cause the
+ // SDK to overwrite the Transport's TLS config's RootCAs value.
+ //
+ // Setting a custom HTTPClient in the aws.Config options will override this setting.
+ // To use this option and custom HTTP client, the HTTP client needs to be provided
+ // when creating the session. Not the service client.
+ //
+ // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+ CustomCABundle string
+}
+
+var (
+ credAccessEnvKey = []string{
+ "AWS_ACCESS_KEY_ID",
+ "AWS_ACCESS_KEY",
+ }
+ credSecretEnvKey = []string{
+ "AWS_SECRET_ACCESS_KEY",
+ "AWS_SECRET_KEY",
+ }
+ credSessionEnvKey = []string{
+ "AWS_SESSION_TOKEN",
+ }
+
+ regionEnvKeys = []string{
+ "AWS_REGION",
+ "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
+ }
+ profileEnvKeys = []string{
+ "AWS_PROFILE",
+ "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
+ }
+)
+
+// loadEnvConfig retrieves the SDK's environment configuration.
+// See `envConfig` for the values that will be retrieved.
+//
+// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
+// the shared SDK config will be loaded in addition to the SDK's specific
+// configuration values.
+func loadEnvConfig() envConfig {
+ enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
+ return envConfigLoad(enableSharedConfig)
+}
+
+// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
+// SDK shared config. See `envConfig` for the values that will be retrieved.
+//
+// Loads the shared configuration in addition to the SDK's specific configuration.
+// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
+// environment variable is set.
+func loadSharedEnvConfig() envConfig {
+ return envConfigLoad(true)
+}
+
+func envConfigLoad(enableSharedConfig bool) envConfig {
+ cfg := envConfig{}
+
+ cfg.EnableSharedConfig = enableSharedConfig
+
+ setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
+ setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
+ setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
+
+ // Require logical grouping of credentials
+ if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
+ cfg.Creds = credentials.Value{}
+ } else {
+ cfg.Creds.ProviderName = "EnvConfigCredentials"
+ }
+
+ regionKeys := regionEnvKeys
+ profileKeys := profileEnvKeys
+ if !cfg.EnableSharedConfig {
+ regionKeys = regionKeys[:1]
+ profileKeys = profileKeys[:1]
+ }
+
+ setFromEnvVal(&cfg.Region, regionKeys)
+ setFromEnvVal(&cfg.Profile, profileKeys)
+
+ cfg.SharedCredentialsFile = sharedCredentialsFilename()
+ cfg.SharedConfigFile = sharedConfigFilename()
+
+ cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
+
+ return cfg
+}
+
+func setFromEnvVal(dst *string, keys []string) {
+ for _, k := range keys {
+ if v := os.Getenv(k); len(v) > 0 {
+ *dst = v
+ break
+ }
+ }
+}
+
+func sharedCredentialsFilename() string {
+ if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 {
+ return name
+ }
+
+ return filepath.Join(userHomeDir(), ".aws", "credentials")
+}
+
+func sharedConfigFilename() string {
+ if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 {
+ return name
+ }
+
+ return filepath.Join(userHomeDir(), ".aws", "config")
+}
+
+func userHomeDir() string {
+ homeDir := os.Getenv("HOME") // *nix
+ if len(homeDir) == 0 { // windows
+ homeDir = os.Getenv("USERPROFILE")
+ }
+
+ return homeDir
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
new file mode 100644
index 00000000..4792d3ae
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -0,0 +1,590 @@
+package session
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Session provides a central location to create service clients from and
+// store configurations and request handlers for those services.
+//
+// Sessions are safe to create service clients concurrently, but it is not safe
+// to mutate the Session concurrently.
+//
+// The Session satisfies the service client's client.ClientConfigProvider.
+type Session struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// New creates a new instance of the handlers merging in the provided configs
+// on top of the SDK's default configurations. Once the Session is created it
+// can be mutated to modify the Config or Handlers. The Session is safe to be
+// read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
+// method could now encounter an error when loading the configuration. When
+// The environment variable is set, and an error occurs, New will return a
+// session that will fail all requests reporting the error that occurred while
+// loading the session. Use NewSession to get the error when creating the
+// session.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded, in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file.
+//
+// Deprecated: Use NewSession functions to create sessions instead. NewSession
+// has the same functionality as New except an error can be returned when the
+// func is called instead of waiting to receive an error until a request is made.
+func New(cfgs ...*aws.Config) *Session {
+ // load initial config from environment
+ envCfg := loadEnvConfig()
+
+ if envCfg.EnableSharedConfig {
+ s, err := newSession(Options{}, envCfg, cfgs...)
+ if err != nil {
+ // Old session.New expected all errors to be discovered when
+ // a request is made, and would report the errors then. This
+ // needs to be replicated if an error occurs while creating
+ // the session.
+ msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
+ "Use session.NewSession to handle errors occurring during session creation."
+
+ // Session creation failed, need to report the error and prevent
+ // any requests from succeeding.
+ s = &Session{Config: defaults.Config()}
+ s.Config.MergeIn(cfgs...)
+ s.Config.Logger.Log("ERROR:", msg, "Error:", err)
+ s.Handlers.Validate.PushBack(func(r *request.Request) {
+ r.Error = err
+ })
+ }
+ return s
+ }
+
+ return deprecatedNewSession(cfgs...)
+}
+
+// NewSession returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. Once the Session is created
+// it can be mutated to modify the Config or Handlers. The Session is safe to
+// be read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// See the NewSessionWithOptions func for information on how to override or
+// control through code how the Session will be created. Such as specifying the
+// config profile, and controlling if shared config is enabled or not.
+func NewSession(cfgs ...*aws.Config) (*Session, error) {
+ opts := Options{}
+ opts.Config.MergeIn(cfgs...)
+
+ return NewSessionWithOptions(opts)
+}
+
+// SharedConfigState provides the ability to optionally override the state
+// of the session's creation based on the shared config being enabled or
+// disabled.
+type SharedConfigState int
+
+const (
+ // SharedConfigStateFromEnv does not override any state of the
+ // AWS_SDK_LOAD_CONFIG env var. It is the default value of the
+ // SharedConfigState type.
+ SharedConfigStateFromEnv SharedConfigState = iota
+
+ // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
+ // and disables the shared config functionality.
+ SharedConfigDisable
+
+ // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
+ // and enables the shared config functionality.
+ SharedConfigEnable
+)
+
+// Options provides the means to control how a Session is created and what
+// configuration values will be loaded.
+//
+type Options struct {
+ // Provides config values for the SDK to use when creating service clients
+ // and making API requests to services. Any value set in with this field
+ // will override the associated value provided by the SDK defaults,
+ // environment or config files where relevant.
+ //
+ // If not set, configuration values from from SDK defaults, environment,
+ // config will be used.
+ Config aws.Config
+
+ // Overrides the config profile the Session should be created from. If not
+ // set the value of the environment variable will be loaded (AWS_PROFILE,
+ // or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
+ //
+ // If not set and environment variables are not set the "default"
+ // (DefaultSharedConfigProfile) will be used as the profile to load the
+ // session config from.
+ Profile string
+
+ // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
+ // environment variable. By default a Session will be created using the
+ // value provided by the AWS_SDK_LOAD_CONFIG environment variable.
+ //
+ // Setting this value to SharedConfigEnable or SharedConfigDisable
+ // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
+ // and enable or disable the shared config functionality.
+ SharedConfigState SharedConfigState
+
+ // When the SDK's shared config is configured to assume a role with MFA
+ // this option is required in order to provide the mechanism that will
+ // retrieve the MFA token. There is no default value for this field. If
+ // it is not set an error will be returned when creating the session.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed. Within the context of service clients
+ // all sharing the same session the SDK will ensure calls to the token
+ // provider are atomic. When sharing a token provider across multiple
+ // sessions additional synchronization logic is needed to ensure the
+ // token providers do not introduce race conditions. It is recommend to
+ // share the session where possible.
+ //
+ // stscreds.StdinTokenProvider is a basic implementation that will prompt
+ // from stdin for the MFA token code.
+ //
+ // This field is only used if the shared configuration is enabled, and
+ // the config enables assume role wit MFA via the mfa_serial field.
+ AssumeRoleTokenProvider func() (string, error)
+
+ // Reader for a custom Credentials Authority (CA) bundle in PEM format that
+ // the SDK will use instead of the default system's root CA bundle. Use this
+ // only if you want to replace the CA bundle the SDK uses for TLS requests.
+ //
+ // Enabling this option will attempt to merge the Transport into the SDK's HTTP
+ // client. If the client's Transport is not a http.Transport an error will be
+ // returned. If the Transport's TLS config is set this option will cause the SDK
+ // to overwrite the Transport's TLS config's RootCAs value. If the CA
+ // bundle reader contains multiple certificates all of them will be loaded.
+ //
+ // The Session option CustomCABundle is also available when creating sessions
+ // to also enable this feature. CustomCABundle session option field has priority
+ // over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+ CustomCABundle io.Reader
+}
+
+// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. This func uses the Options
+// values to configure how the Session is created.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// // Equivalent to session.New
+// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
+//
+// // Specify profile to load for the session's config
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Profile: "profile_name",
+// }))
+//
+// // Specify profile for config and region for requests
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Config: aws.Config{Region: aws.String("us-east-1")},
+// Profile: "profile_name",
+// }))
+//
+// // Force enable Shared Config support
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// SharedConfigState: session.SharedConfigEnable,
+// }))
+func NewSessionWithOptions(opts Options) (*Session, error) {
+ var envCfg envConfig
+ if opts.SharedConfigState == SharedConfigEnable {
+ envCfg = loadSharedEnvConfig()
+ } else {
+ envCfg = loadEnvConfig()
+ }
+
+ if len(opts.Profile) > 0 {
+ envCfg.Profile = opts.Profile
+ }
+
+ switch opts.SharedConfigState {
+ case SharedConfigDisable:
+ envCfg.EnableSharedConfig = false
+ case SharedConfigEnable:
+ envCfg.EnableSharedConfig = true
+ }
+
+ // Only use AWS_CA_BUNDLE if session option is not provided.
+ if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
+ f, err := os.Open(envCfg.CustomCABundle)
+ if err != nil {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to open custom CA bundle PEM file", err)
+ }
+ defer f.Close()
+ opts.CustomCABundle = f
+ }
+
+ return newSession(opts, envCfg, &opts.Config)
+}
+
+// Must is a helper function to ensure the Session is valid and there was no
+// error when calling a NewSession function.
+//
+// This helper is intended to be used in variable initialization to load the
+// Session and configuration at startup. Such as:
+//
+// var sess = session.Must(session.NewSession())
+func Must(sess *Session, err error) *Session {
+ if err != nil {
+ panic(err)
+ }
+
+ return sess
+}
+
+func deprecatedNewSession(cfgs ...*aws.Config) *Session {
+ cfg := defaults.Config()
+ handlers := defaults.Handlers()
+
+ // Apply the passed in configs so the configuration can be applied to the
+ // default credential chain
+ cfg.MergeIn(cfgs...)
+ if cfg.EndpointResolver == nil {
+ // An endpoint resolver is required for a session to be able to provide
+ // endpoints for service client configurations.
+ cfg.EndpointResolver = endpoints.DefaultResolver()
+ }
+ cfg.Credentials = defaults.CredChain(cfg, handlers)
+
+ // Reapply any passed in configs to override credentials if set
+ cfg.MergeIn(cfgs...)
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ }
+
+ initHandlers(s)
+
+ return s
+}
+
+func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
+ cfg := defaults.Config()
+ handlers := defaults.Handlers()
+
+ // Get a merged version of the user provided config to determine if
+ // credentials were.
+ userCfg := &aws.Config{}
+ userCfg.MergeIn(cfgs...)
+
+ // Order config files will be loaded in with later files overwriting
+ // previous config file values.
+ cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
+ if !envCfg.EnableSharedConfig {
+ // The shared config file (~/.aws/config) is only loaded if instructed
+ // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
+ cfgFiles = cfgFiles[1:]
+ }
+
+ // Load additional config from file(s)
+ sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
+ return nil, err
+ }
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ }
+
+ initHandlers(s)
+
+ // Setup HTTP client with custom cert bundle if enabled
+ if opts.CustomCABundle != nil {
+ if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
+ return nil, err
+ }
+ }
+
+ return s, nil
+}
+
+func loadCustomCABundle(s *Session, bundle io.Reader) error {
+ var t *http.Transport
+ switch v := s.Config.HTTPClient.Transport.(type) {
+ case *http.Transport:
+ t = v
+ default:
+ if s.Config.HTTPClient.Transport != nil {
+ return awserr.New("LoadCustomCABundleError",
+ "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
+ }
+ }
+ if t == nil {
+ t = &http.Transport{}
+ }
+
+ p, err := loadCertPool(bundle)
+ if err != nil {
+ return err
+ }
+ if t.TLSClientConfig == nil {
+ t.TLSClientConfig = &tls.Config{}
+ }
+ t.TLSClientConfig.RootCAs = p
+
+ s.Config.HTTPClient.Transport = t
+
+ return nil
+}
+
+func loadCertPool(r io.Reader) (*x509.CertPool, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to read custom CA bundle PEM file", err)
+ }
+
+ p := x509.NewCertPool()
+ if !p.AppendCertsFromPEM(b) {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to load custom CA bundle PEM file", err)
+ }
+
+ return p, nil
+}
+
+func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error {
+ // Merge in user provided configuration
+ cfg.MergeIn(userCfg)
+
+ // Region if not already set by user
+ if len(aws.StringValue(cfg.Region)) == 0 {
+ if len(envCfg.Region) > 0 {
+ cfg.WithRegion(envCfg.Region)
+ } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
+ cfg.WithRegion(sharedCfg.Region)
+ }
+ }
+
+ // Configure credentials if not already set
+ if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
+ if len(envCfg.Creds.AccessKeyID) > 0 {
+ cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+ envCfg.Creds,
+ )
+ } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
+ cfgCp := *cfg
+ cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
+ sharedCfg.AssumeRoleSource.Creds,
+ )
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
+ // AssumeRole Token provider is required if doing Assume Role
+ // with MFA.
+ return AssumeRoleTokenProviderNotSetError{}
+ }
+ cfg.Credentials = stscreds.NewCredentials(
+ &Session{
+ Config: &cfgCp,
+ Handlers: handlers.Copy(),
+ },
+ sharedCfg.AssumeRole.RoleARN,
+ func(opt *stscreds.AssumeRoleProvider) {
+ opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
+
+ // Assume role with external ID
+ if len(sharedCfg.AssumeRole.ExternalID) > 0 {
+ opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
+ }
+
+ // Assume role with MFA
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 {
+ opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
+ opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
+ }
+ },
+ )
+ } else if len(sharedCfg.Creds.AccessKeyID) > 0 {
+ cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+ sharedCfg.Creds,
+ )
+ } else {
+ // Fallback to default credentials provider, include mock errors
+ // for the credential chain so user can identify why credentials
+ // failed to be retrieved.
+ cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: []credentials.Provider{
+ &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
+ &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
+ defaults.RemoteCredProvider(*cfg, handlers),
+ },
+ })
+ }
+ }
+
+ return nil
+}
+
+// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
+// MFAToken option is not set when shared config is configured load assume a
+// role with an MFA token.
+type AssumeRoleTokenProviderNotSetError struct{}
+
+// Code is the short id of the error.
+func (e AssumeRoleTokenProviderNotSetError) Code() string {
+ return "AssumeRoleTokenProviderNotSetError"
+}
+
+// Message is the description of the error
+func (e AssumeRoleTokenProviderNotSetError) Message() string {
+ return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e AssumeRoleTokenProviderNotSetError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
+
+type credProviderError struct {
+ Err error
+}
+
+var emptyCreds = credentials.Value{}
+
+func (c credProviderError) Retrieve() (credentials.Value, error) {
+ return credentials.Value{}, c.Err
+}
+func (c credProviderError) IsExpired() bool {
+ return true
+}
+
+func initHandlers(s *Session) {
+ // Add the Validate parameter handler if it is not disabled.
+ s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
+ if !aws.BoolValue(s.Config.DisableParamValidation) {
+ s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
+ }
+}
+
+// Copy creates and returns a copy of the current Session, coping the config
+// and handlers. If any additional configs are provided they will be merged
+// on top of the Session's copied config.
+//
+// // Create a copy of the current Session, configured for the us-west-2 region.
+// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
+func (s *Session) Copy(cfgs ...*aws.Config) *Session {
+ newSession := &Session{
+ Config: s.Config.Copy(cfgs...),
+ Handlers: s.Handlers.Copy(),
+ }
+
+ initHandlers(newSession)
+
+ return newSession
+}
+
+// ClientConfig satisfies the client.ConfigProvider interface and is used to
+// configure the service client instances. Passing the Session to the service
+// client's constructor (New) will use this method to configure the client.
+func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
+ // Backwards compatibility, the error will be eaten if user calls ClientConfig
+ // directly. All SDK services will use ClientconfigWithError.
+ cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
+
+ return cfg
+}
+
+func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
+ s = s.Copy(cfgs...)
+
+ var resolved endpoints.ResolvedEndpoint
+ var err error
+
+ region := aws.StringValue(s.Config.Region)
+
+ if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
+ resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
+ resolved.SigningRegion = region
+ } else {
+ resolved, err = s.Config.EndpointResolver.EndpointFor(
+ serviceName, region,
+ func(opt *endpoints.Options) {
+ opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
+ opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
+
+ // Support the condition where the service is modeled but its
+ // endpoint metadata is not available.
+ opt.ResolveUnknownService = true
+ },
+ )
+ }
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningName: resolved.SigningName,
+ }, err
+}
+
+// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
+// that the EndpointResolver will not be used to resolve the endpoint. The only
+// endpoint set must come from the aws.Config.Endpoint field.
+func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config {
+ s = s.Copy(cfgs...)
+
+ var resolved endpoints.ResolvedEndpoint
+
+ region := aws.StringValue(s.Config.Region)
+
+ if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
+ resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
+ resolved.SigningRegion = region
+ }
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningName: resolved.SigningName,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
new file mode 100644
index 00000000..b58076f5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -0,0 +1,295 @@
+package session
+
+import (
+ "fmt"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/go-ini/ini"
+)
+
+const (
+ // Static Credentials group
+ accessKeyIDKey = `aws_access_key_id` // group required
+ secretAccessKey = `aws_secret_access_key` // group required
+ sessionTokenKey = `aws_session_token` // optional
+
+ // Assume Role Credentials group
+ roleArnKey = `role_arn` // group required
+ sourceProfileKey = `source_profile` // group required
+ externalIDKey = `external_id` // optional
+ mfaSerialKey = `mfa_serial` // optional
+ roleSessionNameKey = `role_session_name` // optional
+
+ // Additional Config fields
+ regionKey = `region`
+
+ // DefaultSharedConfigProfile is the default profile to be used when
+ // loading configuration from the config files if another profile name
+ // is not provided.
+ DefaultSharedConfigProfile = `default`
+)
+
+type assumeRoleConfig struct {
+ RoleARN string
+ SourceProfile string
+ ExternalID string
+ MFASerial string
+ RoleSessionName string
+}
+
+// sharedConfig represents the configuration fields of the SDK config files.
+type sharedConfig struct {
+ // Credentials values from the config file. Both aws_access_key_id
+ // and aws_secret_access_key must be provided together in the same file
+ // to be considered valid. The values will be ignored if not a complete group.
+ // aws_session_token is an optional field that can be provided if both of the
+ // other two fields are also provided.
+ //
+ // aws_access_key_id
+ // aws_secret_access_key
+ // aws_session_token
+ Creds credentials.Value
+
+ AssumeRole assumeRoleConfig
+ AssumeRoleSource *sharedConfig
+
+ // Region is the region the SDK should use for looking up AWS service endpoints
+ // and signing requests.
+ //
+ // region
+ Region string
+}
+
+type sharedConfigFile struct {
+ Filename string
+ IniData *ini.File
+}
+
+// loadSharedConfig retrieves the configuration from the list of files
+// using the profile provided. The order the files are listed will determine
+// precedence. Values in subsequent files will overwrite values defined in
+// earlier files.
+//
+// For example, given two files A and B. Both define credentials. If the order
+// of the files are A then B, B's credential values will be used instead of A's.
+//
+// See sharedConfig.setFromFile for information how the config files
+// will be loaded.
+func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
+ if len(profile) == 0 {
+ profile = DefaultSharedConfigProfile
+ }
+
+ files, err := loadSharedConfigIniFiles(filenames)
+ if err != nil {
+ return sharedConfig{}, err
+ }
+
+ cfg := sharedConfig{}
+ if err = cfg.setFromIniFiles(profile, files); err != nil {
+ return sharedConfig{}, err
+ }
+
+ if len(cfg.AssumeRole.SourceProfile) > 0 {
+ if err := cfg.setAssumeRoleSource(profile, files); err != nil {
+ return sharedConfig{}, err
+ }
+ }
+
+ return cfg, nil
+}
+
+func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
+ files := make([]sharedConfigFile, 0, len(filenames))
+
+ for _, filename := range filenames {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ // Skip files which can't be opened and read for whatever reason
+ continue
+ }
+
+ f, err := ini.Load(b)
+ if err != nil {
+ return nil, SharedConfigLoadError{Filename: filename}
+ }
+
+ files = append(files, sharedConfigFile{
+ Filename: filename, IniData: f,
+ })
+ }
+
+ return files, nil
+}
+
+func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
+ var assumeRoleSrc sharedConfig
+
+ // Multiple level assume role chains are not support
+ if cfg.AssumeRole.SourceProfile == origProfile {
+ assumeRoleSrc = *cfg
+ assumeRoleSrc.AssumeRole = assumeRoleConfig{}
+ } else {
+ err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
+ if err != nil {
+ return err
+ }
+ }
+
+ if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
+ return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
+ }
+
+ cfg.AssumeRoleSource = &assumeRoleSrc
+
+ return nil
+}
+
+func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
+ // Trim files from the list that don't exist.
+ for _, f := range files {
+ if err := cfg.setFromIniFile(profile, f); err != nil {
+ if _, ok := err.(SharedConfigProfileNotExistsError); ok {
+ // Ignore proviles missings
+ continue
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// setFromFile loads the configuration from the file using
+// the profile provided. A sharedConfig pointer type value is used so that
+// multiple config file loadings can be chained.
+//
+// Only loads complete logically grouped values, and will not set fields in cfg
+// for incomplete grouped values in the config. Such as credentials. For example
+// if a config file only includes aws_access_key_id but no aws_secret_access_key
+// the aws_access_key_id will be ignored.
+func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
+ section, err := file.IniData.GetSection(profile)
+ if err != nil {
+ // Fallback to to alternate profile name: profile <name>
+ section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
+ if err != nil {
+ return SharedConfigProfileNotExistsError{Profile: profile, Err: err}
+ }
+ }
+
+ // Shared Credentials
+ akid := section.Key(accessKeyIDKey).String()
+ secret := section.Key(secretAccessKey).String()
+ if len(akid) > 0 && len(secret) > 0 {
+ cfg.Creds = credentials.Value{
+ AccessKeyID: akid,
+ SecretAccessKey: secret,
+ SessionToken: section.Key(sessionTokenKey).String(),
+ ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
+ }
+ }
+
+ // Assume Role
+ roleArn := section.Key(roleArnKey).String()
+ srcProfile := section.Key(sourceProfileKey).String()
+ if len(roleArn) > 0 && len(srcProfile) > 0 {
+ cfg.AssumeRole = assumeRoleConfig{
+ RoleARN: roleArn,
+ SourceProfile: srcProfile,
+ ExternalID: section.Key(externalIDKey).String(),
+ MFASerial: section.Key(mfaSerialKey).String(),
+ RoleSessionName: section.Key(roleSessionNameKey).String(),
+ }
+ }
+
+ // Region
+ if v := section.Key(regionKey).String(); len(v) > 0 {
+ cfg.Region = v
+ }
+
+ return nil
+}
+
+// SharedConfigLoadError is an error for the shared config file failed to load.
+type SharedConfigLoadError struct {
+ Filename string
+ Err error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigLoadError) Code() string {
+ return "SharedConfigLoadError"
+}
+
+// Message is the description of the error
+func (e SharedConfigLoadError) Message() string {
+ return fmt.Sprintf("failed to load config file, %s", e.Filename)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigLoadError) OrigErr() error {
+ return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigLoadError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigProfileNotExistsError is an error for the shared config when
+// the profile was not find in the config file.
+type SharedConfigProfileNotExistsError struct {
+ Profile string
+ Err error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigProfileNotExistsError) Code() string {
+ return "SharedConfigProfileNotExistsError"
+}
+
+// Message is the description of the error
+func (e SharedConfigProfileNotExistsError) Message() string {
+ return fmt.Sprintf("failed to get profile, %s", e.Profile)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigProfileNotExistsError) OrigErr() error {
+ return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigProfileNotExistsError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigAssumeRoleError is an error for the shared config when the
+// profile contains assume role information, but that information is invalid
+// or not complete.
+type SharedConfigAssumeRoleError struct {
+ RoleARN string
+}
+
+// Code is the short id of the error.
+func (e SharedConfigAssumeRoleError) Code() string {
+ return "SharedConfigAssumeRoleError"
+}
+
+// Message is the description of the error
+func (e SharedConfigAssumeRoleError) Message() string {
+ return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
+ e.RoleARN)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigAssumeRoleError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigAssumeRoleError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
new file mode 100644
index 00000000..244c86da
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
@@ -0,0 +1,82 @@
+package v4
+
+import (
+ "net/http"
+ "strings"
+)
+
+// validator houses a set of rule needed for validation of a
+// string value
+type rules []rule
+
+// rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that rule
+type rule interface {
+ IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r rules) IsValid(value string) bool {
+ for _, rule := range r {
+ if rule.IsValid(value) {
+ return true
+ }
+ }
+ return false
+}
+
+// mapRule generic rule for maps
+type mapRule map[string]struct{}
+
+// IsValid for the map rule satisfies whether it exists in the map
+func (m mapRule) IsValid(value string) bool {
+ _, ok := m[value]
+ return ok
+}
+
+// whitelist is a generic rule for whitelisting
+type whitelist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (w whitelist) IsValid(value string) bool {
+ return w.rule.IsValid(value)
+}
+
+// blacklist is a generic rule for blacklisting
+type blacklist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (b blacklist) IsValid(value string) bool {
+ return !b.rule.IsValid(value)
+}
+
+type patterns []string
+
+// IsValid for patterns checks each pattern and returns if a match has
+// been found
+func (p patterns) IsValid(value string) bool {
+ for _, pattern := range p {
+ if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
+ return true
+ }
+ }
+ return false
+}
+
+// inclusiveRules rules allow for rules to depend on one another
+type inclusiveRules []rule
+
+// IsValid will return true if all rules are true
+func (r inclusiveRules) IsValid(value string) bool {
+ for _, rule := range r {
+ if !rule.IsValid(value) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
new file mode 100644
index 00000000..6aa2ed24
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
@@ -0,0 +1,7 @@
+package v4
+
+// WithUnsignedPayload will enable and set the UnsignedPayload field to
+// true of the signer.
+func WithUnsignedPayload(v4 *Signer) {
+ v4.UnsignedPayload = true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
new file mode 100644
index 00000000..bd082e9d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
@@ -0,0 +1,24 @@
+// +build go1.5
+
+package v4
+
+import (
+ "net/url"
+ "strings"
+)
+
+func getURIPath(u *url.URL) string {
+ var uri string
+
+ if len(u.Opaque) > 0 {
+ uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+ } else {
+ uri = u.EscapedPath()
+ }
+
+ if len(uri) == 0 {
+ uri = "/"
+ }
+
+ return uri
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
new file mode 100644
index 00000000..434ac872
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -0,0 +1,761 @@
+// Package v4 implements signing for AWS V4 signer
+//
+// Provides request signing for request that need to be signed with
+// AWS V4 Signatures.
+//
+// Standalone Signer
+//
+// Generally using the signer outside of the SDK should not require any additional
+// logic when using Go v1.5 or higher. The signer does this by taking advantage
+// of the URL.EscapedPath method. If your request URI requires additional escaping
+// you many need to use the URL.Opaque to define what the raw URI should be sent
+// to the service as.
+//
+// The signer will first check the URL.Opaque field, and use its value if set.
+// The signer does require the URL.Opaque field to be set in the form of:
+//
+// "//<hostname>/<path>"
+//
+// // e.g.
+// "//example.com/some/path"
+//
+// The leading "//" and hostname are required or the URL.Opaque escaping will
+// not work correctly.
+//
+// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
+// method and using the returned value. If you're using Go v1.4 you must set
+// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
+// Go v1.5 the signer will fallback to URL.Path.
+//
+// AWS v4 signature validation requires that the canonical string's URI path
+// element must be the URI escaped form of the HTTP request's path.
+// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+//
+// The Go HTTP client will perform escaping automatically on the request. Some
+// of these escaping may cause signature validation errors because the HTTP
+// request differs from the URI path or query that the signature was generated.
+// https://golang.org/pkg/net/url/#URL.EscapedPath
+//
+// Because of this, it is recommended that when using the signer outside of the
+// SDK that explicitly escaping the request prior to being signed is preferable,
+// and will help prevent signature validation errors. This can be done by setting
+// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
+// call URL.EscapedPath() if Opaque is not set.
+//
+// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
+// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
+// request URL. https://github.com/golang/go/issues/16847 points to a bug in
+// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP
+// message. URL.Opaque generally will force Go to make requests with absolute URL.
+// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
+// or url.EscapedPath will ignore the RawPath escaping.
+//
+// Test `TestStandaloneSign` provides a complete example of using the signer
+// outside of the SDK and pre-escaping the URI path.
+package v4
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+const (
+ authHeaderPrefix = "AWS4-HMAC-SHA256"
+ timeFormat = "20060102T150405Z"
+ shortTimeFormat = "20060102"
+
+ // emptyStringSHA256 is a SHA256 of an empty string
+ emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+)
+
+var ignoredHeaders = rules{
+ blacklist{
+ mapRule{
+ "Authorization": struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
+ },
+ },
+}
+
+// requiredSignedHeaders is a whitelist for build canonical headers.
+var requiredSignedHeaders = rules{
+ whitelist{
+ mapRule{
+ "Cache-Control": struct{}{},
+ "Content-Disposition": struct{}{},
+ "Content-Encoding": struct{}{},
+ "Content-Language": struct{}{},
+ "Content-Md5": struct{}{},
+ "Content-Type": struct{}{},
+ "Expires": struct{}{},
+ "If-Match": struct{}{},
+ "If-Modified-Since": struct{}{},
+ "If-None-Match": struct{}{},
+ "If-Unmodified-Since": struct{}{},
+ "Range": struct{}{},
+ "X-Amz-Acl": struct{}{},
+ "X-Amz-Copy-Source": struct{}{},
+ "X-Amz-Copy-Source-If-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
+ "X-Amz-Copy-Source-If-None-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+ "X-Amz-Copy-Source-Range": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Grant-Full-control": struct{}{},
+ "X-Amz-Grant-Read": struct{}{},
+ "X-Amz-Grant-Read-Acp": struct{}{},
+ "X-Amz-Grant-Write": struct{}{},
+ "X-Amz-Grant-Write-Acp": struct{}{},
+ "X-Amz-Metadata-Directive": struct{}{},
+ "X-Amz-Mfa": struct{}{},
+ "X-Amz-Request-Payer": struct{}{},
+ "X-Amz-Server-Side-Encryption": struct{}{},
+ "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Storage-Class": struct{}{},
+ "X-Amz-Website-Redirect-Location": struct{}{},
+ },
+ },
+ patterns{"X-Amz-Meta-"},
+}
+
+// allowedHoisting is a whitelist for build query headers. The boolean value
+// represents whether or not it is a pattern.
+var allowedQueryHoisting = inclusiveRules{
+ blacklist{requiredSignedHeaders},
+ patterns{"X-Amz-"},
+}
+
+// Signer applies AWS v4 signing to given request. Use this to sign requests
+// that need to be signed with AWS V4 Signatures.
+type Signer struct {
+ // The authentication credentials the request will be signed against.
+ // This value must be set to sign requests.
+ Credentials *credentials.Credentials
+
+ // Sets the log level the signer should use when reporting information to
+ // the logger. If the logger is nil nothing will be logged. See
+ // aws.LogLevelType for more information on available logging levels
+ //
+ // By default nothing will be logged.
+ Debug aws.LogLevelType
+
+ // The logger loging information will be written to. If there the logger
+ // is nil, nothing will be logged.
+ Logger aws.Logger
+
+ // Disables the Signer's moving HTTP header key/value pairs from the HTTP
+ // request header to the request's query string. This is most commonly used
+ // with pre-signed requests preventing headers from being added to the
+ // request's query string.
+ DisableHeaderHoisting bool
+
+ // Disables the automatic escaping of the URI path of the request for the
+ // siganture's canonical string's path. For services that do not need additional
+ // escaping then use this to disable the signer escaping the path.
+ //
+ // S3 is an example of a service that does not need additional escaping.
+ //
+ // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ DisableURIPathEscaping bool
+
+ // Disales the automatical setting of the HTTP request's Body field with the
+ // io.ReadSeeker passed in to the signer. This is useful if you're using a
+ // custom wrapper around the body for the io.ReadSeeker and want to preserve
+ // the Body value on the Request.Body.
+ //
+ // This does run the risk of signing a request with a body that will not be
+ // sent in the request. Need to ensure that the underlying data of the Body
+ // values are the same.
+ DisableRequestBodyOverwrite bool
+
+ // currentTimeFn returns the time value which represents the current time.
+ // This value should only be used for testing. If it is nil the default
+ // time.Now will be used.
+ currentTimeFn func() time.Time
+
+ // UnsignedPayload will prevent signing of the payload. This will only
+ // work for services that have support for this.
+ UnsignedPayload bool
+}
+
+// NewSigner returns a Signer pointer configured with the credentials and optional
+// option values provided. If not options are provided the Signer will use its
+// default configuration.
+func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
+ v4 := &Signer{
+ Credentials: credentials,
+ }
+
+ for _, option := range options {
+ option(v4)
+ }
+
+ return v4
+}
+
+type signingCtx struct {
+ ServiceName string
+ Region string
+ Request *http.Request
+ Body io.ReadSeeker
+ Query url.Values
+ Time time.Time
+ ExpireTime time.Duration
+ SignedHeaderVals http.Header
+
+ DisableURIPathEscaping bool
+
+ credValues credentials.Value
+ isPresign bool
+ formattedTime string
+ formattedShortTime string
+ unsignedPayload bool
+
+ bodyDigest string
+ signedHeaders string
+ canonicalHeaders string
+ canonicalString string
+ credentialString string
+ stringToSign string
+ signature string
+ authorization string
+}
+
+// Sign signs AWS v4 requests with the provided body, service name, region the
+// request is made to, and time the request is signed at. The signTime allows
+// you to specify that a request is signed for the future, and cannot be
+// used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. Generally for signed requests this value
+// is not needed as the full request context will be captured by the http.Request
+// value. It is included for reference though.
+//
+// Sign will set the request's Body to be the `body` parameter passed in. If
+// the body is not already an io.ReadCloser, it will be wrapped within one. If
+// a `nil` body parameter passed to Sign, the request's Body field will be
+// also set to nil. Its important to note that this functionality will not
+// change the request's ContentLength of the request.
+//
+// Sign differs from Presign in that it will sign the request using HTTP
+// header values. This type of signing is intended for http.Request values that
+// will not be shared, or are shared in a way the header values on the request
+// will not be lost.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, 0, signTime)
+}
+
+// Presign signs AWS v4 requests with the provided body, service name, region
+// the request is made to, and time the request is signed at. The signTime
+// allows you to specify that a request is signed for the future, and cannot
+// be used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. For presigned requests these headers
+// and their values must be included on the HTTP request when it is made. This
+// is helpful to know what header values need to be shared with the party the
+// presigned request will be distributed to.
+//
+// Presign differs from Sign in that it will sign the request using query string
+// instead of header values. This allows you to share the Presigned Request's
+// URL with third parties, or distribute it throughout your system with minimal
+// dependencies.
+//
+// Presign also takes an exp value which is the duration the
+// signed request will be valid after the signing time. This is allows you to
+// set when the request will expire.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+//
+// Presigning a S3 request will not compute the body's SHA256 hash by default.
+// This is done due to the general use case for S3 presigned URLs is to share
+// PUT/GET capabilities. If you would like to include the body's SHA256 in the
+// presigned request's signature you can set the "X-Amz-Content-Sha256"
+// HTTP header and that will be included in the request's signature.
+func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, exp, signTime)
+}
+
+func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
+ currentTimeFn := v4.currentTimeFn
+ if currentTimeFn == nil {
+ currentTimeFn = time.Now
+ }
+
+ ctx := &signingCtx{
+ Request: r,
+ Body: body,
+ Query: r.URL.Query(),
+ Time: signTime,
+ ExpireTime: exp,
+ isPresign: exp != 0,
+ ServiceName: service,
+ Region: region,
+ DisableURIPathEscaping: v4.DisableURIPathEscaping,
+ unsignedPayload: v4.UnsignedPayload,
+ }
+
+ for key := range ctx.Query {
+ sort.Strings(ctx.Query[key])
+ }
+
+ if ctx.isRequestSigned() {
+ ctx.Time = currentTimeFn()
+ ctx.handlePresignRemoval()
+ }
+
+ var err error
+ ctx.credValues, err = v4.Credentials.Get()
+ if err != nil {
+ return http.Header{}, err
+ }
+
+ ctx.assignAmzQueryValues()
+ ctx.build(v4.DisableHeaderHoisting)
+
+ // If the request is not presigned the body should be attached to it. This
+ // prevents the confusion of wanting to send a signed request without
+ // the body the request was signed for attached.
+ if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
+ var reader io.ReadCloser
+ if body != nil {
+ var ok bool
+ if reader, ok = body.(io.ReadCloser); !ok {
+ reader = ioutil.NopCloser(body)
+ }
+ }
+ r.Body = reader
+ }
+
+ if v4.Debug.Matches(aws.LogDebugWithSigning) {
+ v4.logSigningInfo(ctx)
+ }
+
+ return ctx.SignedHeaderVals, nil
+}
+
+func (ctx *signingCtx) handlePresignRemoval() {
+ if !ctx.isPresign {
+ return
+ }
+
+ // The credentials have expired for this request. The current signing
+ // is invalid, and needs to be request because the request will fail.
+ ctx.removePresign()
+
+ // Update the request's query string to ensure the values stays in
+ // sync in the case retrieving the new credentials fails.
+ ctx.Request.URL.RawQuery = ctx.Query.Encode()
+}
+
+func (ctx *signingCtx) assignAmzQueryValues() {
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
+ if ctx.credValues.SessionToken != "" {
+ ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ } else {
+ ctx.Query.Del("X-Amz-Security-Token")
+ }
+
+ return
+ }
+
+ if ctx.credValues.SessionToken != "" {
+ ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ }
+}
+
+// SignRequestHandler is a named request handler the SDK will use to sign
+// service client request with using the V4 signature.
+var SignRequestHandler = request.NamedHandler{
+ Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
+}
+
+// SignSDKRequest signs an AWS request with the V4 signature. This
+// request handler is bested used only with the SDK's built in service client's
+// API operation requests.
+//
+// This function should not be used on its on its own, but in conjunction with
+// an AWS service client's API operation call. To sign a standalone request
+// not created by a service client's API operation method use the "Sign" or
+// "Presign" functions of the "Signer" type.
+//
+// If the credentials of the request's config are set to
+// credentials.AnonymousCredentials the request will not be signed.
+func SignSDKRequest(req *request.Request) {
+ signSDKRequestWithCurrTime(req, time.Now)
+}
+
+// BuildNamedHandler will build a generic handler for signing.
+func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
+ return request.NamedHandler{
+ Name: name,
+ Fn: func(req *request.Request) {
+ signSDKRequestWithCurrTime(req, time.Now, opts...)
+ },
+ }
+}
+
+func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
+ // If the request does not need to be signed ignore the signing of the
+ // request if the AnonymousCredentials object is used.
+ if req.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ region := req.ClientInfo.SigningRegion
+ if region == "" {
+ region = aws.StringValue(req.Config.Region)
+ }
+
+ name := req.ClientInfo.SigningName
+ if name == "" {
+ name = req.ClientInfo.ServiceName
+ }
+
+ v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
+ v4.Debug = req.Config.LogLevel.Value()
+ v4.Logger = req.Config.Logger
+ v4.DisableHeaderHoisting = req.NotHoist
+ v4.currentTimeFn = curTimeFn
+ if name == "s3" {
+ // S3 service should not have any escaping applied
+ v4.DisableURIPathEscaping = true
+ }
+ // Prevents setting the HTTPRequest's Body. Since the Body could be
+ // wrapped in a custom io.Closer that we do not want to be stompped
+ // on top of by the signer.
+ v4.DisableRequestBodyOverwrite = true
+ })
+
+ for _, opt := range opts {
+ opt(v4)
+ }
+
+ signingTime := req.Time
+ if !req.LastSignedAt.IsZero() {
+ signingTime = req.LastSignedAt
+ }
+
+ signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
+ name, region, req.ExpireTime, signingTime,
+ )
+ if err != nil {
+ req.Error = err
+ req.SignedHeaderVals = nil
+ return
+ }
+
+ req.SignedHeaderVals = signedHeaders
+ req.LastSignedAt = curTimeFn()
+}
+
+const logSignInfoMsg = `DEBUG: Request Signature:
+---[ CANONICAL STRING ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
+
+func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
+ signedURLMsg := ""
+ if ctx.isPresign {
+ signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
+ }
+ msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
+ v4.Logger.Log(msg)
+}
+
+func (ctx *signingCtx) build(disableHeaderHoisting bool) {
+ ctx.buildTime() // no depends
+ ctx.buildCredentialString() // no depends
+
+ unsignedHeaders := ctx.Request.Header
+ if ctx.isPresign {
+ if !disableHeaderHoisting {
+ urlValues := url.Values{}
+ urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
+ for k := range urlValues {
+ ctx.Query[k] = urlValues[k]
+ }
+ }
+ }
+
+ ctx.buildBodyDigest()
+ ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
+ ctx.buildCanonicalString() // depends on canon headers / signed headers
+ ctx.buildStringToSign() // depends on canon string
+ ctx.buildSignature() // depends on string to sign
+
+ if ctx.isPresign {
+ ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
+ } else {
+ parts := []string{
+ authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
+ "SignedHeaders=" + ctx.signedHeaders,
+ "Signature=" + ctx.signature,
+ }
+ ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
+ }
+}
+
+func (ctx *signingCtx) buildTime() {
+ ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
+ ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
+
+ if ctx.isPresign {
+ duration := int64(ctx.ExpireTime / time.Second)
+ ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
+ ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
+ } else {
+ ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
+ }
+}
+
+func (ctx *signingCtx) buildCredentialString() {
+ ctx.credentialString = strings.Join([]string{
+ ctx.formattedShortTime,
+ ctx.Region,
+ ctx.ServiceName,
+ "aws4_request",
+ }, "/")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
+ }
+}
+
+func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
+ query := url.Values{}
+ unsignedHeaders := http.Header{}
+ for k, h := range header {
+ if r.IsValid(k) {
+ query[k] = h
+ } else {
+ unsignedHeaders[k] = h
+ }
+ }
+
+ return query, unsignedHeaders
+}
+func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
+ var headers []string
+ headers = append(headers, "host")
+ for k, v := range header {
+ canonicalKey := http.CanonicalHeaderKey(k)
+ if !r.IsValid(canonicalKey) {
+ continue // ignored header
+ }
+ if ctx.SignedHeaderVals == nil {
+ ctx.SignedHeaderVals = make(http.Header)
+ }
+
+ lowerCaseKey := strings.ToLower(k)
+ if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
+ // include additional values
+ ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
+ continue
+ }
+
+ headers = append(headers, lowerCaseKey)
+ ctx.SignedHeaderVals[lowerCaseKey] = v
+ }
+ sort.Strings(headers)
+
+ ctx.signedHeaders = strings.Join(headers, ";")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
+ }
+
+ headerValues := make([]string, len(headers))
+ for i, k := range headers {
+ if k == "host" {
+ headerValues[i] = "host:" + ctx.Request.URL.Host
+ } else {
+ headerValues[i] = k + ":" +
+ strings.Join(ctx.SignedHeaderVals[k], ",")
+ }
+ }
+
+ ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
+}
+
+func (ctx *signingCtx) buildCanonicalString() {
+ ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
+
+ uri := getURIPath(ctx.Request.URL)
+
+ if !ctx.DisableURIPathEscaping {
+ uri = rest.EscapePath(uri, false)
+ }
+
+ ctx.canonicalString = strings.Join([]string{
+ ctx.Request.Method,
+ uri,
+ ctx.Request.URL.RawQuery,
+ ctx.canonicalHeaders + "\n",
+ ctx.signedHeaders,
+ ctx.bodyDigest,
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildStringToSign() {
+ ctx.stringToSign = strings.Join([]string{
+ authHeaderPrefix,
+ ctx.formattedTime,
+ ctx.credentialString,
+ hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildSignature() {
+ secret := ctx.credValues.SecretAccessKey
+ date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
+ region := makeHmac(date, []byte(ctx.Region))
+ service := makeHmac(region, []byte(ctx.ServiceName))
+ credentials := makeHmac(service, []byte("aws4_request"))
+ signature := makeHmac(credentials, []byte(ctx.stringToSign))
+ ctx.signature = hex.EncodeToString(signature)
+}
+
+func (ctx *signingCtx) buildBodyDigest() {
+ hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
+ if hash == "" {
+ if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") {
+ hash = "UNSIGNED-PAYLOAD"
+ } else if ctx.Body == nil {
+ hash = emptyStringSHA256
+ } else {
+ hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
+ }
+ if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
+ ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
+ }
+ }
+ ctx.bodyDigest = hash
+}
+
+// isRequestSigned returns if the request is currently signed or presigned
+func (ctx *signingCtx) isRequestSigned() bool {
+ if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
+ return true
+ }
+ if ctx.Request.Header.Get("Authorization") != "" {
+ return true
+ }
+
+ return false
+}
+
+// unsign removes signing flags for both signed and presigned requests.
+func (ctx *signingCtx) removePresign() {
+ ctx.Query.Del("X-Amz-Algorithm")
+ ctx.Query.Del("X-Amz-Signature")
+ ctx.Query.Del("X-Amz-Security-Token")
+ ctx.Query.Del("X-Amz-Date")
+ ctx.Query.Del("X-Amz-Expires")
+ ctx.Query.Del("X-Amz-Credential")
+ ctx.Query.Del("X-Amz-SignedHeaders")
+}
+
+func makeHmac(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256Reader(reader io.ReadSeeker) []byte {
+ hash := sha256.New()
+ start, _ := reader.Seek(0, 1)
+ defer reader.Seek(start, 0)
+
+ io.Copy(hash, reader)
+ return hash.Sum(nil)
+}
+
+const doubleSpaces = " "
+
+var doubleSpaceBytes = []byte(doubleSpaces)
+
+func stripExcessSpaces(headerVals []string) []string {
+ vals := make([]string, len(headerVals))
+ for i, str := range headerVals {
+ // Trim leading and trailing spaces
+ trimmed := strings.TrimSpace(str)
+
+ idx := strings.Index(trimmed, doubleSpaces)
+ var buf []byte
+ for idx > -1 {
+ // Multiple adjacent spaces found
+ if buf == nil {
+ // first time create the buffer
+ buf = []byte(trimmed)
+ }
+
+ stripToIdx := -1
+ for j := idx + 1; j < len(buf); j++ {
+ if buf[j] != ' ' {
+ buf = append(buf[:idx+1], buf[j:]...)
+ stripToIdx = j
+ break
+ }
+ }
+
+ if stripToIdx >= 0 {
+ idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes)
+ if idx >= 0 {
+ idx += stripToIdx
+ }
+ } else {
+ idx = -1
+ }
+ }
+
+ if buf != nil {
+ vals[i] = string(buf)
+ } else {
+ vals[i] = trimmed
+ }
+ }
+ return vals
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
new file mode 100644
index 00000000..0e2d864e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go
@@ -0,0 +1,118 @@
+package aws
+
+import (
+ "io"
+ "sync"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
+// only be used with an io.Reader that is also an io.Seeker. Doing so may
+// cause request signature errors, or request body's not sent for GET, HEAD
+// and DELETE HTTP methods.
+//
+// Deprecated: Should only be used with io.ReadSeeker. If using for
+// S3 PutObject to stream content use s3manager.Uploader instead.
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+ return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+ r io.Reader
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+ switch t := r.r.(type) {
+ case io.Reader:
+ return t.Read(p)
+ }
+ return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+ switch t := r.r.(type) {
+ case io.Seeker:
+ return t.Seek(offset, whence)
+ }
+ return int64(0), nil
+}
+
+// IsSeeker returns if the underlying reader is also a seeker.
+func (r ReaderSeekerCloser) IsSeeker() bool {
+ _, ok := r.r.(io.Seeker)
+ return ok
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r ReaderSeekerCloser) Close() error {
+ switch t := r.r.(type) {
+ case io.Closer:
+ return t.Close()
+ }
+ return nil
+}
+
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
+// Can be used with the s3manager.Downloader to download content to a buffer
+// in memory. Safe to use concurrently.
+type WriteAtBuffer struct {
+ buf []byte
+ m sync.Mutex
+
+ // GrowthCoeff defines the growth rate of the internal buffer. By
+ // default, the growth rate is 1, where expanding the internal
+ // buffer will allocate only enough capacity to fit the new expected
+ // length.
+ GrowthCoeff float64
+}
+
+// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
+// provided by buf.
+func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
+ return &WriteAtBuffer{buf: buf}
+}
+
+// WriteAt writes a slice of bytes to a buffer starting at the position provided
+// The number of bytes written will be returned, or error. Can overwrite previous
+// written slices if the write ats overlap.
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+ pLen := len(p)
+ expLen := pos + int64(pLen)
+ b.m.Lock()
+ defer b.m.Unlock()
+ if int64(len(b.buf)) < expLen {
+ if int64(cap(b.buf)) < expLen {
+ if b.GrowthCoeff < 1 {
+ b.GrowthCoeff = 1
+ }
+ newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+ b.buf = b.buf[:expLen]
+ }
+ copy(b.buf[pos:], p)
+ return pLen, nil
+}
+
+// Bytes returns a slice of bytes written to the buffer.
+func (b *WriteAtBuffer) Bytes() []byte {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.buf
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go
new file mode 100644
index 00000000..6192b245
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go
@@ -0,0 +1,12 @@
+// +build go1.8
+
+package aws
+
+import "net/url"
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Wrapper of net/url#URL.Hostname for backwards Go version compatibility.
+func URLHostname(url *url.URL) string {
+ return url.Hostname()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
new file mode 100644
index 00000000..0210d272
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
@@ -0,0 +1,29 @@
+// +build !go1.8
+
+package aws
+
+import (
+ "net/url"
+ "strings"
+)
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Copy of Go 1.8's net/url#URL.Hostname functionality.
+func URLHostname(url *url.URL) string {
+ return stripPort(url.Host)
+
+}
+
+// stripPort is copy of Go 1.8 url#URL.Hostname functionality.
+// https://golang.org/src/net/url/url.go
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
new file mode 100644
index 00000000..eeb6467a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "1.8.19"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/README.md b/vendor/github.com/aws/aws-sdk-go/private/README.md
new file mode 100644
index 00000000..5bdb4c50
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/README.md
@@ -0,0 +1,4 @@
+## AWS SDK for Go Private packages ##
+`private` is a collection of packages used internally by the SDK, and is subject to have breaking changes. This package is not `internal` so that if you really need to use its functionality, and understand breaking changes will be made, you are able to.
+
+These packages will be refactored in the future so that the API generator and model parsers are exposed cleanly on their own. Making it easier for you to generate your own code based on the API models.
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
new file mode 100644
index 00000000..53831dff
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
@@ -0,0 +1,75 @@
+package protocol
+
+import (
+ "crypto/rand"
+ "fmt"
+ "reflect"
+)
+
+// RandReader is the random reader the protocol package will use to read
+// random bytes from. This is exported for testing, and should not be used.
+var RandReader = rand.Reader
+
+const idempotencyTokenFillTag = `idempotencyToken`
+
+// CanSetIdempotencyToken returns true if the struct field should be
+// automatically populated with a Idempotency token.
+//
+// Only *string and string type fields that are tagged with idempotencyToken
+// which are not already set can be auto filled.
+func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
+ switch u := v.Interface().(type) {
+ // To auto fill an Idempotency token the field must be a string,
+ // tagged for auto fill, and have a zero value.
+ case *string:
+ return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ case string:
+ return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ }
+
+ return false
+}
+
+// GetIdempotencyToken returns a randomly generated idempotency token.
+func GetIdempotencyToken() string {
+ b := make([]byte, 16)
+ RandReader.Read(b)
+
+ return UUIDVersion4(b)
+}
+
+// SetIdempotencyToken will set the value provided with a Idempotency Token.
+// Given that the value can be set. Will panic if value is not setable.
+func SetIdempotencyToken(v reflect.Value) {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() && v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = reflect.Indirect(v)
+
+ if !v.CanSet() {
+ panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
+ }
+
+ b := make([]byte, 16)
+ _, err := rand.Read(b)
+ if err != nil {
+ // TODO handle error
+ return
+ }
+
+ v.Set(reflect.ValueOf(UUIDVersion4(b)))
+}
+
+// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
+func UUIDVersion4(u []byte) string {
+ // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+ // 13th character is "4"
+ u[6] = (u[6] | 0x40) & 0x4F
+ // 17th character is "8", "9", "a", or "b"
+ u[8] = (u[8] | 0x80) & 0xBF
+
+ return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
new file mode 100644
index 00000000..18169f0f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
@@ -0,0 +1,36 @@
+// Package query provides serialization of AWS query requests, and responses.
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
+
+import (
+ "net/url"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
+)
+
+// BuildHandler is a named request handler for building query protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
+
+// Build builds a request for an AWS Query service.
+func Build(r *request.Request) {
+ body := url.Values{
+ "Action": {r.Operation.Name},
+ "Version": {r.ClientInfo.APIVersion},
+ }
+ if err := queryutil.Parse(body, r.Params, false); err != nil {
+ r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
+ return
+ }
+
+ if r.ExpireTime == 0 {
+ r.HTTPRequest.Method = "POST"
+ r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+ r.SetBufferBody([]byte(body.Encode()))
+ } else { // This is a pre-signed request
+ r.HTTPRequest.Method = "GET"
+ r.HTTPRequest.URL.RawQuery = body.Encode()
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
new file mode 100644
index 00000000..524ca952
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -0,0 +1,237 @@
+package queryutil
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/url"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// Parse parses an object i and fills a url.Values object. The isEC2 flag
+// indicates if this is the EC2 Query sub-protocol.
+func Parse(body url.Values, i interface{}, isEC2 bool) error {
+ q := queryParser{isEC2: isEC2}
+ return q.parseValue(body, reflect.ValueOf(i), "", "")
+}
+
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+type queryParser struct {
+ isEC2 bool
+}
+
+func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ value = elemOf(value)
+
+ // no need to handle zero values
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ return q.parseStruct(v, value, prefix)
+ case "list":
+ return q.parseList(v, value, prefix, tag)
+ case "map":
+ return q.parseMap(v, value, prefix, tag)
+ default:
+ return q.parseScalar(v, value, prefix, tag)
+ }
+}
+
+func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ elemValue := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ elemValue = reflect.ValueOf(token)
+ }
+
+ var name string
+ if q.isEC2 {
+ name = field.Tag.Get("queryName")
+ }
+ if name == "" {
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+ if name != "" && q.isEC2 {
+ name = strings.ToUpper(name[0:1]) + name[1:]
+ }
+ }
+ if name == "" {
+ name = field.Name
+ }
+
+ if prefix != "" {
+ name = prefix + "." + name
+ }
+
+ if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ if listName := tag.Get("locationNameList"); listName == "" {
+ prefix += ".member"
+ } else {
+ prefix += "." + listName
+ }
+ }
+
+ for i := 0; i < value.Len(); i++ {
+ slicePrefix := prefix
+ if slicePrefix == "" {
+ slicePrefix = strconv.Itoa(i + 1)
+ } else {
+ slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
+ }
+ if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ prefix += ".entry"
+ }
+
+ // sort keys for improved serialization consistency.
+ // this is not strictly necessary for protocol support.
+ mapKeyValues := value.MapKeys()
+ mapKeys := map[string]reflect.Value{}
+ mapKeyNames := make([]string, len(mapKeyValues))
+ for i, mapKey := range mapKeyValues {
+ name := mapKey.String()
+ mapKeys[name] = mapKey
+ mapKeyNames[i] = name
+ }
+ sort.Strings(mapKeyNames)
+
+ for i, mapKeyName := range mapKeyNames {
+ mapKey := mapKeys[mapKeyName]
+ mapValue := value.MapIndex(mapKey)
+
+ kname := tag.Get("locationNameKey")
+ if kname == "" {
+ kname = "key"
+ }
+ vname := tag.Get("locationNameValue")
+ if vname == "" {
+ vname = "value"
+ }
+
+ // serialize key
+ var keyName string
+ if prefix == "" {
+ keyName = strconv.Itoa(i+1) + "." + kname
+ } else {
+ keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
+ }
+
+ if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
+ return err
+ }
+
+ // serialize value
+ var valueName string
+ if prefix == "" {
+ valueName = strconv.Itoa(i+1) + "." + vname
+ } else {
+ valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
+ }
+
+ if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := r.Interface().(type) {
+ case string:
+ v.Set(name, value)
+ case []byte:
+ if !r.IsNil() {
+ v.Set(name, base64.StdEncoding.EncodeToString(value))
+ }
+ case bool:
+ v.Set(name, strconv.FormatBool(value))
+ case int64:
+ v.Set(name, strconv.FormatInt(value, 10))
+ case int:
+ v.Set(name, strconv.Itoa(value))
+ case float64:
+ v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
+ case float32:
+ v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ v.Set(name, value.UTC().Format(ISO8601UTC))
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
new file mode 100644
index 00000000..e0f4d5a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
@@ -0,0 +1,35 @@
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
+
+import (
+ "encoding/xml"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals a response for an AWS Query service.
+func Unmarshal(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ if r.DataFilled() {
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
+ return
+ }
+ }
+}
+
+// UnmarshalMeta unmarshals header response values for an AWS Query service.
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
new file mode 100644
index 00000000..f2142961
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
@@ -0,0 +1,66 @@
+package query
+
+import (
+ "encoding/xml"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+type xmlErrorResponse struct {
+ XMLName xml.Name `xml:"ErrorResponse"`
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+}
+
+type xmlServiceUnavailableResponse struct {
+ XMLName xml.Name `xml:"ServiceUnavailableException"`
+}
+
+// UnmarshalErrorHandler is a name request handler to unmarshal request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
+
+// UnmarshalError unmarshals an error response for an AWS Query service.
+func UnmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
+ return
+ }
+
+ // First check for specific error
+ resp := xmlErrorResponse{}
+ decodeErr := xml.Unmarshal(bodyBytes, &resp)
+ if decodeErr == nil {
+ reqID := resp.RequestID
+ if reqID == "" {
+ reqID = r.RequestID
+ }
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(resp.Code, resp.Message, nil),
+ r.HTTPResponse.StatusCode,
+ reqID,
+ )
+ return
+ }
+
+ // Check for unhandled error
+ servUnavailResp := xmlServiceUnavailableResponse{}
+ unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
+ if unavailErr == nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("ServiceUnavailableException", "service is unavailable", nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // Failed to retrieve any error message from the response body
+ r.Error = awserr.New("SerializationError",
+ "failed to decode query XML error response", decodeErr)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
new file mode 100644
index 00000000..71618356
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -0,0 +1,290 @@
+// Package rest provides RESTful serialization of AWS requests and responses.
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// RFC822 returns an RFC822 formatted timestamp for AWS protocols
+const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
+
+// Whether the byte value can be sent without escaping in AWS URLs
+var noEscape [256]bool
+
+var errValueNotSet = fmt.Errorf("value not set")
+
+func init() {
+ for i := 0; i < len(noEscape); i++ {
+ // AWS expects every character except these to be escaped
+ noEscape[i] = (i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ i == '-' ||
+ i == '.' ||
+ i == '_' ||
+ i == '~'
+ }
+}
+
+// BuildHandler is a named request handler for building rest protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
+
+// Build builds the REST component of a service request.
+func Build(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, false)
+ buildBody(r, v)
+ }
+}
+
+// BuildAsGET builds the REST component of a service request with the ability to hoist
+// data from the body.
+func BuildAsGET(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, true)
+ buildBody(r, v)
+ }
+}
+
+func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) {
+ query := r.HTTPRequest.URL.Query()
+
+ // Setup the raw path to match the base path pattern. This is needed
+ // so that when the path is mutated a custom escaped version can be
+ // stored in RawPath that will be used by the Go client.
+ r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path
+
+ for i := 0; i < v.NumField(); i++ {
+ m := v.Field(i)
+ if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ field := v.Type().Field(i)
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+ if kind := m.Kind(); kind == reflect.Ptr {
+ m = m.Elem()
+ } else if kind == reflect.Interface {
+ if !m.Elem().IsValid() {
+ continue
+ }
+ }
+ if !m.IsValid() {
+ continue
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ var err error
+ switch field.Tag.Get("location") {
+ case "headers": // header maps
+ err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag)
+ case "header":
+ err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag)
+ case "uri":
+ err = buildURI(r.HTTPRequest.URL, m, name, field.Tag)
+ case "querystring":
+ err = buildQueryString(query, m, name, field.Tag)
+ default:
+ if buildGETQuery {
+ err = buildQueryString(query, m, name, field.Tag)
+ }
+ }
+ r.Error = err
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+
+ r.HTTPRequest.URL.RawQuery = query.Encode()
+ if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) {
+ cleanPath(r.HTTPRequest.URL)
+ }
+}
+
+func buildBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := reflect.Indirect(v.FieldByName(payloadName))
+ if payload.IsValid() && payload.Interface() != nil {
+ switch reader := payload.Interface().(type) {
+ case io.ReadSeeker:
+ r.SetReaderBody(reader)
+ case []byte:
+ r.SetBufferBody(reader)
+ case string:
+ r.SetStringBody(reader)
+ default:
+ r.Error = awserr.New("SerializationError",
+ "failed to encode REST request",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+}
+
+func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error {
+ str, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+
+ header.Add(name, str)
+
+ return nil
+}
+
+func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error {
+ prefix := tag.Get("locationName")
+ for _, key := range v.MapKeys() {
+ str, err := convertType(v.MapIndex(key), tag)
+ if err == errValueNotSet {
+ continue
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+
+ }
+
+ header.Add(prefix+key.String(), str)
+ }
+ return nil
+}
+
+func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error {
+ value, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+
+ u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
+ u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1)
+
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1)
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1)
+
+ return nil
+}
+
+func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := v.Interface().(type) {
+ case []*string:
+ for _, item := range value {
+ query.Add(name, *item)
+ }
+ case map[string]*string:
+ for key, item := range value {
+ query.Add(key, *item)
+ }
+ case map[string][]*string:
+ for key, items := range value {
+ for _, item := range items {
+ query.Add(key, *item)
+ }
+ }
+ default:
+ str, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+ query.Set(name, str)
+ }
+
+ return nil
+}
+
+func cleanPath(u *url.URL) {
+ hasSlash := strings.HasSuffix(u.Path, "/")
+
+ // clean up path, removing duplicate `/`
+ u.Path = path.Clean(u.Path)
+ u.RawPath = path.Clean(u.RawPath)
+
+ if hasSlash && !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ u.RawPath += "/"
+ }
+}
+
+// EscapePath escapes part of a URL path in Amazon style
+func EscapePath(path string, encodeSep bool) string {
+ var buf bytes.Buffer
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if noEscape[c] || (c == '/' && !encodeSep) {
+ buf.WriteByte(c)
+ } else {
+ fmt.Fprintf(&buf, "%%%02X", c)
+ }
+ }
+ return buf.String()
+}
+
+func convertType(v reflect.Value, tag reflect.StructTag) (string, error) {
+ v = reflect.Indirect(v)
+ if !v.IsValid() {
+ return "", errValueNotSet
+ }
+
+ var str string
+ switch value := v.Interface().(type) {
+ case string:
+ str = value
+ case []byte:
+ str = base64.StdEncoding.EncodeToString(value)
+ case bool:
+ str = strconv.FormatBool(value)
+ case int64:
+ str = strconv.FormatInt(value, 10)
+ case float64:
+ str = strconv.FormatFloat(value, 'f', -1, 64)
+ case time.Time:
+ str = value.UTC().Format(RFC822)
+ case aws.JSONValue:
+ b, err := json.Marshal(value)
+ if err != nil {
+ return "", err
+ }
+ if tag.Get("location") == "header" {
+ str = base64.StdEncoding.EncodeToString(b)
+ } else {
+ str = string(b)
+ }
+ default:
+ err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return "", err
+ }
+ return str, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
new file mode 100644
index 00000000..4366de2e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
@@ -0,0 +1,45 @@
+package rest
+
+import "reflect"
+
+// PayloadMember returns the payload field member of i if there is one, or nil.
+func PayloadMember(i interface{}) interface{} {
+ if i == nil {
+ return nil
+ }
+
+ v := reflect.ValueOf(i).Elem()
+ if !v.IsValid() {
+ return nil
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ field, _ := v.Type().FieldByName(payloadName)
+ if field.Tag.Get("type") != "structure" {
+ return nil
+ }
+
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
+ return payload.Interface()
+ }
+ }
+ }
+ return nil
+}
+
+// PayloadType returns the type of a payload field member of i if there is one, or "".
+func PayloadType(i interface{}) string {
+ v := reflect.Indirect(reflect.ValueOf(i))
+ if !v.IsValid() {
+ return ""
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ if member, ok := v.Type().FieldByName(payloadName); ok {
+ return member.Tag.Get("type")
+ }
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
new file mode 100644
index 00000000..7a779ee2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -0,0 +1,227 @@
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals the REST component of a response in a REST service.
+func Unmarshal(r *request.Request) {
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalBody(r, v)
+ }
+}
+
+// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+ if r.RequestID == "" {
+ // Alternative version of request id in the header
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
+ }
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalLocationElements(r, v)
+ }
+}
+
+func unmarshalBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() {
+ switch payload.Interface().(type) {
+ case []byte:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ } else {
+ payload.Set(reflect.ValueOf(b))
+ }
+ case *string:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ } else {
+ str := string(b)
+ payload.Set(reflect.ValueOf(&str))
+ }
+ default:
+ switch payload.Type().String() {
+ case "io.ReadCloser":
+ payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+ case "io.ReadSeeker":
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to read response body", err)
+ return
+ }
+ payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b))))
+ default:
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ defer r.HTTPResponse.Body.Close()
+ r.Error = awserr.New("SerializationError",
+ "failed to decode REST response",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func unmarshalLocationElements(r *request.Request, v reflect.Value) {
+ for i := 0; i < v.NumField(); i++ {
+ m, field := v.Field(i), v.Type().Field(i)
+ if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+
+ switch field.Tag.Get("location") {
+ case "statusCode":
+ unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
+ case "header":
+ err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ break
+ }
+ case "headers":
+ prefix := field.Tag.Get("locationName")
+ err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ break
+ }
+ }
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+}
+
+func unmarshalStatusCode(v reflect.Value, statusCode int) {
+ if !v.IsValid() {
+ return
+ }
+
+ switch v.Interface().(type) {
+ case *int64:
+ s := int64(statusCode)
+ v.Set(reflect.ValueOf(&s))
+ }
+}
+
+func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
+ switch r.Interface().(type) {
+ case map[string]*string: // we only support string map value types
+ out := map[string]*string{}
+ for k, v := range headers {
+ k = http.CanonicalHeaderKey(k)
+ if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
+ out[k[len(prefix):]] = &v[0]
+ }
+ }
+ r.Set(reflect.ValueOf(out))
+ }
+ return nil
+}
+
+func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
+ isJSONValue := tag.Get("type") == "jsonvalue"
+ if isJSONValue {
+ if len(header) == 0 {
+ return nil
+ }
+ } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
+ return nil
+ }
+
+ switch v.Interface().(type) {
+ case *string:
+ v.Set(reflect.ValueOf(&header))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *bool:
+ b, err := strconv.ParseBool(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *int64:
+ i, err := strconv.ParseInt(header, 10, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&i))
+ case *float64:
+ f, err := strconv.ParseFloat(header, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&f))
+ case *time.Time:
+ t, err := time.Parse(RFC822, header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&t))
+ case aws.JSONValue:
+ b := []byte(header)
+ var err error
+ if tag.Get("location") == "header" {
+ b, err = base64.StdEncoding.DecodeString(header)
+ if err != nil {
+ return err
+ }
+ }
+
+ m := aws.JSONValue{}
+ err = json.Unmarshal(b, &m)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(m))
+ default:
+ err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
new file mode 100644
index 00000000..7bdf4c85
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
@@ -0,0 +1,69 @@
+// Package restxml provides RESTful XML serialization of AWS
+// requests and responses.
+package restxml
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
+
+import (
+ "bytes"
+ "encoding/xml"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// BuildHandler is a named request handler for building restxml protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build}
+
+// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError}
+
+// Build builds a request payload for the REST XML protocol.
+func Build(r *request.Request) {
+ rest.Build(r)
+
+ if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
+ var buf bytes.Buffer
+ err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf))
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err)
+ return
+ }
+ r.SetBufferBody(buf.Bytes())
+ }
+}
+
+// Unmarshal unmarshals a payload response for the REST XML protocol.
+func Unmarshal(r *request.Request) {
+ if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
+ defer r.HTTPResponse.Body.Close()
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, "")
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err)
+ return
+ }
+ } else {
+ rest.Unmarshal(r)
+ }
+}
+
+// UnmarshalMeta unmarshals response headers for the REST XML protocol.
+func UnmarshalMeta(r *request.Request) {
+ rest.UnmarshalMeta(r)
+}
+
+// UnmarshalError unmarshals a response error for the REST XML protocol.
+func UnmarshalError(r *request.Request) {
+ query.UnmarshalError(r)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
new file mode 100644
index 00000000..da1a6811
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
@@ -0,0 +1,21 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
+var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
+
+// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
+func UnmarshalDiscardBody(r *request.Request) {
+ if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
+ return
+ }
+
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ r.HTTPResponse.Body.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
new file mode 100644
index 00000000..7091b456
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -0,0 +1,296 @@
+// Package xmlutil provides XML serialization of AWS requests and responses.
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// BuildXML will serialize params into an xml.Encoder.
+// Error will be returned if the serialization of any of the params or nested values fails.
+func BuildXML(params interface{}, e *xml.Encoder) error {
+ b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
+ root := NewXMLElement(xml.Name{})
+ if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
+ return err
+ }
+ for _, c := range root.Children {
+ for _, v := range c {
+ return StructToXML(e, v, false)
+ }
+ }
+ return nil
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+// A xmlBuilder serializes values from Go code to XML
+type xmlBuilder struct {
+ encoder *xml.Encoder
+ namespaces map[string]string
+}
+
+// buildValue generic XMLNode builder for any type. Will build value for their specific type
+// struct, list, map, scalar.
+//
+// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
+// type is not provided reflect will be used to determine the value's type.
+func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ value = elemOf(value)
+ if !value.IsValid() { // no need to handle zero values
+ return nil
+ } else if tag.Get("location") != "" { // don't handle non-body location values
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := value.Type().FieldByName("_"); ok {
+ tag = tag + reflect.StructTag(" ") + field.Tag
+ }
+ return b.buildStruct(value, current, tag)
+ case "list":
+ return b.buildList(value, current, tag)
+ case "map":
+ return b.buildMap(value, current, tag)
+ default:
+ return b.buildScalar(value, current, tag)
+ }
+}
+
+// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested
+// types are converted to XMLNodes also.
+func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ fieldAdded := false
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+
+ if !value.IsValid() {
+ return nil
+ }
+ }
+
+ child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+
+ // there is an xmlNamespace associated with this struct
+ if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
+ ns := xml.Attr{
+ Name: xml.Name{Local: "xmlns"},
+ Value: uri,
+ }
+ if prefix != "" {
+ b.namespaces[prefix] = uri // register the namespace
+ ns.Name.Local = "xmlns:" + prefix
+ }
+
+ child.Attr = append(child.Attr, ns)
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ member := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ mTag := field.Tag
+ if mTag.Get("location") != "" { // skip non-body members
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ member = reflect.ValueOf(token)
+ }
+
+ memberName := mTag.Get("locationName")
+ if memberName == "" {
+ memberName = field.Name
+ mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
+ }
+ if err := b.buildValue(member, child, mTag); err != nil {
+ return err
+ }
+
+ fieldAdded = true
+ }
+
+ if fieldAdded { // only append this child if we have one ore more valid members
+ current.AddChild(child)
+ }
+
+ return nil
+}
+
+// buildList adds the value's list items to the current XMLNode as children nodes. All
+// nested values in the list are converted to XMLNodes also.
+func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted lists
+ return nil
+ }
+
+ // check for unflattened list member
+ flattened := tag.Get("flattened") != ""
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if flattened {
+ for i := 0; i < value.Len(); i++ {
+ child := NewXMLElement(xname)
+ current.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ } else {
+ list := NewXMLElement(xname)
+ current.AddChild(list)
+
+ for i := 0; i < value.Len(); i++ {
+ iname := tag.Get("locationNameList")
+ if iname == "" {
+ iname = "member"
+ }
+
+ child := NewXMLElement(xml.Name{Local: iname})
+ list.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
+// nested values in the map are converted to XMLNodes also.
+//
+// Error will be returned if it is unable to build the map's values into XMLNodes
+func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted maps
+ return nil
+ }
+
+ maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+ current.AddChild(maproot)
+ current = maproot
+
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ // sorting is not required for compliance, but it makes testing easier
+ keys := make([]string, value.Len())
+ for i, k := range value.MapKeys() {
+ keys[i] = k.String()
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := value.MapIndex(reflect.ValueOf(k))
+
+ mapcur := current
+ if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
+ child := NewXMLElement(xml.Name{Local: "entry"})
+ mapcur.AddChild(child)
+ mapcur = child
+ }
+
+ kchild := NewXMLElement(xml.Name{Local: kname})
+ kchild.Text = k
+ vchild := NewXMLElement(xml.Name{Local: vname})
+ mapcur.AddChild(kchild)
+ mapcur.AddChild(vchild)
+
+ if err := b.buildValue(v, vchild, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// buildScalar will convert the value into a string and append it as a attribute or child
+// of the current XMLNode.
+//
+// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
+//
+// Error will be returned if the value type is unsupported.
+func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ var str string
+ switch converted := value.Interface().(type) {
+ case string:
+ str = converted
+ case []byte:
+ if !value.IsNil() {
+ str = base64.StdEncoding.EncodeToString(converted)
+ }
+ case bool:
+ str = strconv.FormatBool(converted)
+ case int64:
+ str = strconv.FormatInt(converted, 10)
+ case int:
+ str = strconv.Itoa(converted)
+ case float64:
+ str = strconv.FormatFloat(converted, 'f', -1, 64)
+ case float32:
+ str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ str = converted.UTC().Format(ISO8601UTC)
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)",
+ tag.Get("locationName"), value.Interface(), value.Type().Name())
+ }
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
+ attr := xml.Attr{Name: xname, Value: str}
+ current.Attr = append(current.Attr, attr)
+ } else { // regular text node
+ current.AddChild(&XMLNode{Name: xname, Text: str})
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
new file mode 100644
index 00000000..87584628
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -0,0 +1,260 @@
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// UnmarshalXML deserializes an xml.Decoder into the container v. V
+// needs to match the shape of the XML expected to be decoded.
+// If the shape doesn't match unmarshaling will fail.
+func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
+ n, err := XMLToStruct(d, nil)
+ if err != nil {
+ return err
+ }
+ if n.Children != nil {
+ for _, root := range n.Children {
+ for _, c := range root {
+ if wrappedChild, ok := c.Children[wrapper]; ok {
+ c = wrappedChild[0] // pull out wrapped element
+ }
+
+ err = parse(reflect.ValueOf(v), c, "")
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ return nil
+}
+
+// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
+// will be used to determine the type from r.
+func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ rtype := r.Type()
+ if rtype.Kind() == reflect.Ptr {
+ rtype = rtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch rtype.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := rtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return parseStruct(r, node, tag)
+ case "list":
+ return parseList(r, node, tag)
+ case "map":
+ return parseMap(r, node, tag)
+ default:
+ return parseScalar(r, node, tag)
+ }
+}
+
+// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
+// types in the structure will also be deserialized.
+func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+ if r.Kind() == reflect.Ptr {
+ if r.IsNil() { // create the structure if it's nil
+ s := reflect.New(r.Type().Elem())
+ r.Set(s)
+ r = s
+ }
+
+ r = r.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return parseStruct(r.FieldByName(payload), node, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if c := field.Name[0:1]; strings.ToLower(c) == c {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ // try to find the field by name in elements
+ elems := node.Children[name]
+
+ if elems == nil { // try to find the field in attributes
+ if val, ok := node.findElem(name); ok {
+ elems = []*XMLNode{{Text: val}}
+ }
+ }
+
+ member := r.FieldByName(field.Name)
+ for _, elem := range elems {
+ err := parse(member, elem, field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// parseList deserializes a list of values from an XML node. Each list entry
+// will also be deserialized.
+func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+
+ if tag.Get("flattened") == "" { // look at all item entries
+ mname := "member"
+ if name := tag.Get("locationNameList"); name != "" {
+ mname = name
+ }
+
+ if Children, ok := node.Children[mname]; ok {
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
+ }
+
+ for i, c := range Children {
+ err := parse(r.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+ }
+ } else { // flattened list means this is a single element
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, 0, 0))
+ }
+
+ childR := reflect.Zero(t.Elem())
+ r.Set(reflect.Append(r, childR))
+ err := parse(r.Index(r.Len()-1), node, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
+// will also be deserialized as map entries.
+func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ if r.IsNil() {
+ r.Set(reflect.MakeMap(r.Type()))
+ }
+
+ if tag.Get("flattened") == "" { // look at all child entries
+ for _, entry := range node.Children["entry"] {
+ parseMapEntry(r, entry, tag)
+ }
+ } else { // this element is itself an entry
+ parseMapEntry(r, node, tag)
+ }
+
+ return nil
+}
+
+// parseMapEntry deserializes a map entry from a XML node.
+func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ keys, ok := node.Children[kname]
+ values := node.Children[vname]
+ if ok {
+ for i, key := range keys {
+ keyR := reflect.ValueOf(key.Text)
+ value := values[i]
+ valueR := reflect.New(r.Type().Elem()).Elem()
+
+ parse(valueR, value, "")
+ r.SetMapIndex(keyR, valueR)
+ }
+ }
+ return nil
+}
+
+// parseScaller deserializes an XMLNode value into a concrete type based on the
+// interface type of r.
+//
+// Error is returned if the deserialization fails due to invalid type conversion,
+// or unsupported interface type.
+func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ switch r.Interface().(type) {
+ case *string:
+ r.Set(reflect.ValueOf(&node.Text))
+ return nil
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(b))
+ case *bool:
+ v, err := strconv.ParseBool(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *int64:
+ v, err := strconv.ParseInt(node.Text, 10, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *float64:
+ v, err := strconv.ParseFloat(node.Text, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ t, err := time.Parse(ISO8601UTC, node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&t))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
new file mode 100644
index 00000000..3e970b62
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -0,0 +1,147 @@
+package xmlutil
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "sort"
+)
+
+// A XMLNode contains the values to be encoded or decoded.
+type XMLNode struct {
+ Name xml.Name `json:",omitempty"`
+ Children map[string][]*XMLNode `json:",omitempty"`
+ Text string `json:",omitempty"`
+ Attr []xml.Attr `json:",omitempty"`
+
+ namespaces map[string]string
+ parent *XMLNode
+}
+
+// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
+func NewXMLElement(name xml.Name) *XMLNode {
+ return &XMLNode{
+ Name: name,
+ Children: map[string][]*XMLNode{},
+ Attr: []xml.Attr{},
+ }
+}
+
+// AddChild adds child to the XMLNode.
+func (n *XMLNode) AddChild(child *XMLNode) {
+ if _, ok := n.Children[child.Name.Local]; !ok {
+ n.Children[child.Name.Local] = []*XMLNode{}
+ }
+ n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
+}
+
+// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
+func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
+ out := &XMLNode{}
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ if err == io.EOF {
+ break
+ } else {
+ return out, err
+ }
+ }
+
+ if tok == nil {
+ break
+ }
+
+ switch typed := tok.(type) {
+ case xml.CharData:
+ out.Text = string(typed.Copy())
+ case xml.StartElement:
+ el := typed.Copy()
+ out.Attr = el.Attr
+ if out.Children == nil {
+ out.Children = map[string][]*XMLNode{}
+ }
+
+ name := typed.Name.Local
+ slice := out.Children[name]
+ if slice == nil {
+ slice = []*XMLNode{}
+ }
+ node, e := XMLToStruct(d, &el)
+ out.findNamespaces()
+ if e != nil {
+ return out, e
+ }
+ node.Name = typed.Name
+ node.findNamespaces()
+ tempOut := *out
+ // Save into a temp variable, simply because out gets squashed during
+ // loop iterations
+ node.parent = &tempOut
+ slice = append(slice, node)
+ out.Children[name] = slice
+ case xml.EndElement:
+ if s != nil && s.Name.Local == typed.Name.Local { // matching end token
+ return out, nil
+ }
+ out = &XMLNode{}
+ }
+ }
+ return out, nil
+}
+
+func (n *XMLNode) findNamespaces() {
+ ns := map[string]string{}
+ for _, a := range n.Attr {
+ if a.Name.Space == "xmlns" {
+ ns[a.Value] = a.Name.Local
+ }
+ }
+
+ n.namespaces = ns
+}
+
+func (n *XMLNode) findElem(name string) (string, bool) {
+ for node := n; node != nil; node = node.parent {
+ for _, a := range node.Attr {
+ namespace := a.Name.Space
+ if v, ok := node.namespaces[namespace]; ok {
+ namespace = v
+ }
+ if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) {
+ return a.Value, true
+ }
+ }
+ }
+ return "", false
+}
+
+// StructToXML writes an XMLNode to a xml.Encoder as tokens.
+func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
+ e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
+
+ if node.Text != "" {
+ e.EncodeToken(xml.CharData([]byte(node.Text)))
+ } else if sorted {
+ sortedNames := []string{}
+ for k := range node.Children {
+ sortedNames = append(sortedNames, k)
+ }
+ sort.Strings(sortedNames)
+
+ for _, k := range sortedNames {
+ for _, v := range node.Children[k] {
+ StructToXML(e, v, sorted)
+ }
+ }
+ } else {
+ for _, c := range node.Children {
+ for _, v := range c {
+ StructToXML(e, v, sorted)
+ }
+ }
+ }
+
+ e.EncodeToken(xml.EndElement{Name: node.Name})
+ return e.Flush()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
new file mode 100644
index 00000000..52ac02ca
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -0,0 +1,19245 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/restxml"
+)
+
+const opAbortMultipartUpload = "AbortMultipartUpload"
+
+// AbortMultipartUploadRequest generates a "aws/request.Request" representing the
+// client's request for the AbortMultipartUpload operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See AbortMultipartUpload for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the AbortMultipartUpload method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the AbortMultipartUploadRequest method.
+// req, resp := client.AbortMultipartUploadRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
+func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) {
+ op := &request.Operation{
+ Name: opAbortMultipartUpload,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &AbortMultipartUploadInput{}
+ }
+
+ output = &AbortMultipartUploadOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AbortMultipartUpload API operation for Amazon Simple Storage Service.
+//
+// Aborts a multipart upload.
+//
+// To verify that all parts have been removed, so you don't get charged for
+// the part storage, you should call the List Parts operation and ensure the
+// parts list is empty.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation AbortMultipartUpload for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchUpload "NoSuchUpload"
+// The specified multipart upload does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
+func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) {
+ req, out := c.AbortMultipartUploadRequest(input)
+ return out, req.Send()
+}
+
+// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AbortMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) {
+ req, out := c.AbortMultipartUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCompleteMultipartUpload = "CompleteMultipartUpload"
+
+// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the
+// client's request for the CompleteMultipartUpload operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See CompleteMultipartUpload for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the CompleteMultipartUpload method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the CompleteMultipartUploadRequest method.
+// req, resp := client.CompleteMultipartUploadRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
+func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) {
+ op := &request.Operation{
+ Name: opCompleteMultipartUpload,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &CompleteMultipartUploadInput{}
+ }
+
+ output = &CompleteMultipartUploadOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CompleteMultipartUpload API operation for Amazon Simple Storage Service.
+//
+// Completes a multipart upload by assembling previously uploaded parts.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CompleteMultipartUpload for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
+func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) {
+ req, out := c.CompleteMultipartUploadRequest(input)
+ return out, req.Send()
+}
+
+// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CompleteMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) {
+ req, out := c.CompleteMultipartUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCopyObject = "CopyObject"
+
+// CopyObjectRequest generates a "aws/request.Request" representing the
+// client's request for the CopyObject operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See CopyObject for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the CopyObject method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the CopyObjectRequest method.
+// req, resp := client.CopyObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
+func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) {
+ op := &request.Operation{
+ Name: opCopyObject,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &CopyObjectInput{}
+ }
+
+ output = &CopyObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CopyObject API operation for Amazon Simple Storage Service.
+//
+// Creates a copy of an object that is already stored in Amazon S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CopyObject for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError"
+// The source object of the COPY operation is not in the active tier and is
+// only stored in Amazon Glacier.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
+func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) {
+ req, out := c.CopyObjectRequest(input)
+ return out, req.Send()
+}
+
+// CopyObjectWithContext is the same as CopyObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CopyObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) {
+ req, out := c.CopyObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateBucket = "CreateBucket"
+
+// CreateBucketRequest generates a "aws/request.Request" representing the
+// client's request for the CreateBucket operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See CreateBucket for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the CreateBucket method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the CreateBucketRequest method.
+// req, resp := client.CreateBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
+func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) {
+ op := &request.Operation{
+ Name: opCreateBucket,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &CreateBucketInput{}
+ }
+
+ output = &CreateBucketOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateBucket API operation for Amazon Simple Storage Service.
+//
+// Creates a new bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CreateBucket for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeBucketAlreadyExists "BucketAlreadyExists"
+// The requested bucket name is not available. The bucket namespace is shared
+// by all users of the system. Please select a different name and try again.
+//
+// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou"
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
+func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
+ req, out := c.CreateBucketRequest(input)
+ return out, req.Send()
+}
+
+// CreateBucketWithContext is the same as CreateBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) {
+ req, out := c.CreateBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateMultipartUpload = "CreateMultipartUpload"
+
+// CreateMultipartUploadRequest generates a "aws/request.Request" representing the
+// client's request for the CreateMultipartUpload operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See CreateMultipartUpload for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the CreateMultipartUpload method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the CreateMultipartUploadRequest method.
+// req, resp := client.CreateMultipartUploadRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
+func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) {
+ op := &request.Operation{
+ Name: opCreateMultipartUpload,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?uploads",
+ }
+
+ if input == nil {
+ input = &CreateMultipartUploadInput{}
+ }
+
+ output = &CreateMultipartUploadOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateMultipartUpload API operation for Amazon Simple Storage Service.
+//
+// Initiates a multipart upload and returns an upload ID.
+//
+// Note: After you initiate multipart upload and upload one or more parts, you
+// must either complete or abort multipart upload in order to stop getting charged
+// for storage of the uploaded parts. Only after you either complete or abort
+// multipart upload, Amazon S3 frees up the parts storage and stops charging
+// you for the parts storage.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CreateMultipartUpload for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
+func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) {
+ req, out := c.CreateMultipartUploadRequest(input)
+ return out, req.Send()
+}
+
+// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) {
+ req, out := c.CreateMultipartUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucket = "DeleteBucket"
+
+// DeleteBucketRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucket operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucket for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucket method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketRequest method.
+// req, resp := client.DeleteBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
+func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucket,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &DeleteBucketInput{}
+ }
+
+ output = &DeleteBucketOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucket API operation for Amazon Simple Storage Service.
+//
+// Deletes the bucket. All objects (including all object versions and Delete
+// Markers) in the bucket must be deleted before the bucket itself can be deleted.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucket for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
+func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) {
+ req, out := c.DeleteBucketRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketWithContext is the same as DeleteBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) {
+ req, out := c.DeleteBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration"
+
+// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketAnalyticsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method.
+// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
+func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketAnalyticsConfiguration,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?analytics",
+ }
+
+ if input == nil {
+ input = &DeleteBucketAnalyticsConfigurationInput{}
+ }
+
+ output = &DeleteBucketAnalyticsConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Deletes an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketAnalyticsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
+func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.DeleteBucketAnalyticsConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.DeleteBucketAnalyticsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketCors = "DeleteBucketCors"
+
+// DeleteBucketCorsRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketCors operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketCors for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketCors method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketCorsRequest method.
+// req, resp := client.DeleteBucketCorsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
+func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketCors,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &DeleteBucketCorsInput{}
+ }
+
+ output = &DeleteBucketCorsOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketCors API operation for Amazon Simple Storage Service.
+//
+// Deletes the cors configuration information set for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketCors for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
+func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) {
+ req, out := c.DeleteBucketCorsRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) {
+ req, out := c.DeleteBucketCorsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration"
+
+// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketInventoryConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketInventoryConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method.
+// req, resp := client.DeleteBucketInventoryConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
+func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketInventoryConfiguration,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+
+ if input == nil {
+ input = &DeleteBucketInventoryConfigurationInput{}
+ }
+
+ output = &DeleteBucketInventoryConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
+//
+// Deletes an inventory configuration (identified by the inventory ID) from
+// the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketInventoryConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
+func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) {
+ req, out := c.DeleteBucketInventoryConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketInventoryConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) {
+ req, out := c.DeleteBucketInventoryConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
+
+// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketLifecycle operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketLifecycle for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketLifecycle method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketLifecycleRequest method.
+// req, resp := client.DeleteBucketLifecycleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
+func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketLifecycle,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &DeleteBucketLifecycleInput{}
+ }
+
+ output = &DeleteBucketLifecycleOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketLifecycle API operation for Amazon Simple Storage Service.
+//
+// Deletes the lifecycle configuration from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketLifecycle for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
+func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) {
+ req, out := c.DeleteBucketLifecycleRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) {
+ req, out := c.DeleteBucketLifecycleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration"
+
+// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketMetricsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketMetricsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method.
+// req, resp := client.DeleteBucketMetricsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
+func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketMetricsConfiguration,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?metrics",
+ }
+
+ if input == nil {
+ input = &DeleteBucketMetricsConfigurationInput{}
+ }
+
+ output = &DeleteBucketMetricsConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Deletes a metrics configuration (specified by the metrics configuration ID)
+// from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketMetricsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
+func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) {
+ req, out := c.DeleteBucketMetricsConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketMetricsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) {
+ req, out := c.DeleteBucketMetricsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketPolicy = "DeleteBucketPolicy"
+
+// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketPolicy operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketPolicy for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketPolicy method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketPolicyRequest method.
+// req, resp := client.DeleteBucketPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
+func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketPolicy,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?policy",
+ }
+
+ if input == nil {
+ input = &DeleteBucketPolicyInput{}
+ }
+
+ output = &DeleteBucketPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketPolicy API operation for Amazon Simple Storage Service.
+//
+// Deletes the policy from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketPolicy for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
+func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) {
+ req, out := c.DeleteBucketPolicyRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) {
+ req, out := c.DeleteBucketPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketReplication = "DeleteBucketReplication"
+
+// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketReplication operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketReplication for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketReplication method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketReplicationRequest method.
+// req, resp := client.DeleteBucketReplicationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
+func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketReplication,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &DeleteBucketReplicationInput{}
+ }
+
+ output = &DeleteBucketReplicationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketReplication API operation for Amazon Simple Storage Service.
+//
+// Deletes the replication configuration from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketReplication for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
+func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
+ req, out := c.DeleteBucketReplicationRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) {
+ req, out := c.DeleteBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketTagging = "DeleteBucketTagging"
+
+// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketTaggingRequest method.
+// req, resp := client.DeleteBucketTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
+func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketTagging,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?tagging",
+ }
+
+ if input == nil {
+ input = &DeleteBucketTaggingInput{}
+ }
+
+ output = &DeleteBucketTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketTagging API operation for Amazon Simple Storage Service.
+//
+// Deletes the tags from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
+func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) {
+ req, out := c.DeleteBucketTaggingRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) {
+ req, out := c.DeleteBucketTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketWebsite = "DeleteBucketWebsite"
+
+// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketWebsite operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketWebsite for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketWebsite method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketWebsiteRequest method.
+// req, resp := client.DeleteBucketWebsiteRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
+func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketWebsite,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &DeleteBucketWebsiteInput{}
+ }
+
+ output = &DeleteBucketWebsiteOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketWebsite API operation for Amazon Simple Storage Service.
+//
+// This operation removes the website configuration from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketWebsite for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
+func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) {
+ req, out := c.DeleteBucketWebsiteRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) {
+ req, out := c.DeleteBucketWebsiteRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteObject = "DeleteObject"
+
+// DeleteObjectRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObject operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteObject for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteObject method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteObjectRequest method.
+// req, resp := client.DeleteObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
+func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) {
+ op := &request.Operation{
+ Name: opDeleteObject,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &DeleteObjectInput{}
+ }
+
+ output = &DeleteObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteObject API operation for Amazon Simple Storage Service.
+//
+// Removes the null version (if there is one) of an object and inserts a delete
+// marker, which becomes the latest version of the object. If there isn't a
+// null version, Amazon S3 does not remove any objects.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObject for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
+func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) {
+ req, out := c.DeleteObjectRequest(input)
+ return out, req.Send()
+}
+
+// DeleteObjectWithContext is the same as DeleteObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) {
+ req, out := c.DeleteObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteObjectTagging = "DeleteObjectTagging"
+
+// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObjectTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteObjectTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteObjectTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteObjectTaggingRequest method.
+// req, resp := client.DeleteObjectTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
+func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) {
+ op := &request.Operation{
+ Name: opDeleteObjectTagging,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}?tagging",
+ }
+
+ if input == nil {
+ input = &DeleteObjectTaggingInput{}
+ }
+
+ output = &DeleteObjectTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Removes the tag-set from an existing object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObjectTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
+func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) {
+ req, out := c.DeleteObjectTaggingRequest(input)
+ return out, req.Send()
+}
+
+// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) {
+ req, out := c.DeleteObjectTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteObjects = "DeleteObjects"
+
+// DeleteObjectsRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObjects operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteObjects for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteObjects method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteObjectsRequest method.
+// req, resp := client.DeleteObjectsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
+func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) {
+ op := &request.Operation{
+ Name: opDeleteObjects,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}?delete",
+ }
+
+ if input == nil {
+ input = &DeleteObjectsInput{}
+ }
+
+ output = &DeleteObjectsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteObjects API operation for Amazon Simple Storage Service.
+//
+// This operation enables you to delete multiple objects from a bucket using
+// a single HTTP request. You may specify up to 1000 keys.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObjects for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
+func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) {
+ req, out := c.DeleteObjectsRequest(input)
+ return out, req.Send()
+}
+
+// DeleteObjectsWithContext is the same as DeleteObjects with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObjects for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) {
+ req, out := c.DeleteObjectsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
+
+// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketAccelerateConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketAccelerateConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketAccelerateConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketAccelerateConfigurationRequest method.
+// req, resp := client.GetBucketAccelerateConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
+func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketAccelerateConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?accelerate",
+ }
+
+ if input == nil {
+ input = &GetBucketAccelerateConfigurationInput{}
+ }
+
+ output = &GetBucketAccelerateConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns the accelerate configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAccelerateConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
+func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) {
+ req, out := c.GetBucketAccelerateConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAccelerateConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) {
+ req, out := c.GetBucketAccelerateConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketAcl = "GetBucketAcl"
+
+// GetBucketAclRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketAcl operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketAcl for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketAcl method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketAclRequest method.
+// req, resp := client.GetBucketAclRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
+func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) {
+ op := &request.Operation{
+ Name: opGetBucketAcl,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?acl",
+ }
+
+ if input == nil {
+ input = &GetBucketAclInput{}
+ }
+
+ output = &GetBucketAclOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketAcl API operation for Amazon Simple Storage Service.
+//
+// Gets the access control policy for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAcl for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
+func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) {
+ req, out := c.GetBucketAclRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketAclWithContext is the same as GetBucketAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) {
+ req, out := c.GetBucketAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration"
+
+// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketAnalyticsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketAnalyticsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method.
+// req, resp := client.GetBucketAnalyticsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
+func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketAnalyticsConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?analytics",
+ }
+
+ if input == nil {
+ input = &GetBucketAnalyticsConfigurationInput{}
+ }
+
+ output = &GetBucketAnalyticsConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Gets an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAnalyticsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
+func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.GetBucketAnalyticsConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAnalyticsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.GetBucketAnalyticsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketCors = "GetBucketCors"
+
+// GetBucketCorsRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketCors operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketCors for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketCors method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketCorsRequest method.
+// req, resp := client.GetBucketCorsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
+func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) {
+ op := &request.Operation{
+ Name: opGetBucketCors,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &GetBucketCorsInput{}
+ }
+
+ output = &GetBucketCorsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketCors API operation for Amazon Simple Storage Service.
+//
+// Returns the cors configuration for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketCors for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
+func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) {
+ req, out := c.GetBucketCorsRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketCorsWithContext is the same as GetBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) {
+ req, out := c.GetBucketCorsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration"
+
+// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketInventoryConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketInventoryConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketInventoryConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketInventoryConfigurationRequest method.
+// req, resp := client.GetBucketInventoryConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
+func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketInventoryConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+
+ if input == nil {
+ input = &GetBucketInventoryConfigurationInput{}
+ }
+
+ output = &GetBucketInventoryConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns an inventory configuration (identified by the inventory ID) from
+// the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketInventoryConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
+func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) {
+ req, out := c.GetBucketInventoryConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketInventoryConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) {
+ req, out := c.GetBucketInventoryConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketLifecycle = "GetBucketLifecycle"
+
+// GetBucketLifecycleRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLifecycle operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketLifecycle for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketLifecycle method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketLifecycleRequest method.
+// req, resp := client.GetBucketLifecycleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
+func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) {
+ if c.Client.Config.Logger != nil {
+ c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated")
+ }
+ op := &request.Operation{
+ Name: opGetBucketLifecycle,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &GetBucketLifecycleInput{}
+ }
+
+ output = &GetBucketLifecycleOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketLifecycle API operation for Amazon Simple Storage Service.
+//
+// Deprecated, see the GetBucketLifecycleConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLifecycle for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
+func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) {
+ req, out := c.GetBucketLifecycleRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) {
+ req, out := c.GetBucketLifecycleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
+
+// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLifecycleConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketLifecycleConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketLifecycleConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketLifecycleConfigurationRequest method.
+// req, resp := client.GetBucketLifecycleConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
+func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketLifecycleConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &GetBucketLifecycleConfigurationInput{}
+ }
+
+ output = &GetBucketLifecycleConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns the lifecycle configuration information set on the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLifecycleConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
+func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) {
+ req, out := c.GetBucketLifecycleConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLifecycleConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) {
+ req, out := c.GetBucketLifecycleConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketLocation = "GetBucketLocation"
+
+// GetBucketLocationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLocation operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketLocation for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketLocation method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketLocationRequest method.
+// req, resp := client.GetBucketLocationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
+func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketLocation,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?location",
+ }
+
+ if input == nil {
+ input = &GetBucketLocationInput{}
+ }
+
+ output = &GetBucketLocationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketLocation API operation for Amazon Simple Storage Service.
+//
+// Returns the region the bucket resides in.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLocation for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
+func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) {
+ req, out := c.GetBucketLocationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLocation for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) {
+ req, out := c.GetBucketLocationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketLogging = "GetBucketLogging"
+
+// GetBucketLoggingRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLogging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketLogging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketLogging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketLoggingRequest method.
+// req, resp := client.GetBucketLoggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
+func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) {
+ op := &request.Operation{
+ Name: opGetBucketLogging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?logging",
+ }
+
+ if input == nil {
+ input = &GetBucketLoggingInput{}
+ }
+
+ output = &GetBucketLoggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketLogging API operation for Amazon Simple Storage Service.
+//
+// Returns the logging status of a bucket and the permissions users have to
+// view and modify that status. To use GET, you must be the bucket owner.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLogging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
+func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) {
+ req, out := c.GetBucketLoggingRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLogging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) {
+ req, out := c.GetBucketLoggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration"
+
+// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketMetricsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketMetricsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketMetricsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketMetricsConfigurationRequest method.
+// req, resp := client.GetBucketMetricsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
+func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketMetricsConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?metrics",
+ }
+
+ if input == nil {
+ input = &GetBucketMetricsConfigurationInput{}
+ }
+
+ output = &GetBucketMetricsConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Gets a metrics configuration (specified by the metrics configuration ID)
+// from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketMetricsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
+func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) {
+ req, out := c.GetBucketMetricsConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketMetricsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) {
+ req, out := c.GetBucketMetricsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketNotification = "GetBucketNotification"
+
+// GetBucketNotificationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketNotification operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketNotification for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketNotification method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketNotificationRequest method.
+// req, resp := client.GetBucketNotificationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
+func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) {
+ if c.Client.Config.Logger != nil {
+ c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated")
+ }
+ op := &request.Operation{
+ Name: opGetBucketNotification,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &GetBucketNotificationConfigurationRequest{}
+ }
+
+ output = &NotificationConfigurationDeprecated{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketNotification API operation for Amazon Simple Storage Service.
+//
+// Deprecated, see the GetBucketNotificationConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketNotification for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
+func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) {
+ req, out := c.GetBucketNotificationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketNotification for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) {
+ req, out := c.GetBucketNotificationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration"
+
+// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketNotificationConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketNotificationConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketNotificationConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketNotificationConfigurationRequest method.
+// req, resp := client.GetBucketNotificationConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
+func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) {
+ op := &request.Operation{
+ Name: opGetBucketNotificationConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &GetBucketNotificationConfigurationRequest{}
+ }
+
+ output = &NotificationConfiguration{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns the notification configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketNotificationConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
+func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) {
+ req, out := c.GetBucketNotificationConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketNotificationConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) {
+ req, out := c.GetBucketNotificationConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketPolicy = "GetBucketPolicy"
+
+// GetBucketPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketPolicy operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketPolicy for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketPolicy method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketPolicyRequest method.
+// req, resp := client.GetBucketPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
+func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) {
+ op := &request.Operation{
+ Name: opGetBucketPolicy,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?policy",
+ }
+
+ if input == nil {
+ input = &GetBucketPolicyInput{}
+ }
+
+ output = &GetBucketPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketPolicy API operation for Amazon Simple Storage Service.
+//
+// Returns the policy of a specified bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketPolicy for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
+func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) {
+ req, out := c.GetBucketPolicyRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) {
+ req, out := c.GetBucketPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketReplication = "GetBucketReplication"
+
+// GetBucketReplicationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketReplication operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketReplication for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketReplication method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketReplicationRequest method.
+// req, resp := client.GetBucketReplicationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
+func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketReplication,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &GetBucketReplicationInput{}
+ }
+
+ output = &GetBucketReplicationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketReplication API operation for Amazon Simple Storage Service.
+//
+// Returns the replication configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketReplication for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
+func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
+ req, out := c.GetBucketReplicationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) {
+ req, out := c.GetBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketRequestPayment = "GetBucketRequestPayment"
+
+// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketRequestPayment operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketRequestPayment for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketRequestPayment method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketRequestPaymentRequest method.
+// req, resp := client.GetBucketRequestPaymentRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
+func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) {
+ op := &request.Operation{
+ Name: opGetBucketRequestPayment,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?requestPayment",
+ }
+
+ if input == nil {
+ input = &GetBucketRequestPaymentInput{}
+ }
+
+ output = &GetBucketRequestPaymentOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketRequestPayment API operation for Amazon Simple Storage Service.
+//
+// Returns the request payment configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketRequestPayment for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
+func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) {
+ req, out := c.GetBucketRequestPaymentRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketRequestPayment for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) {
+ req, out := c.GetBucketRequestPaymentRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketTagging = "GetBucketTagging"
+
+// GetBucketTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketTaggingRequest method.
+// req, resp := client.GetBucketTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
+func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) {
+ op := &request.Operation{
+ Name: opGetBucketTagging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?tagging",
+ }
+
+ if input == nil {
+ input = &GetBucketTaggingInput{}
+ }
+
+ output = &GetBucketTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketTagging API operation for Amazon Simple Storage Service.
+//
+// Returns the tag set associated with the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
+func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) {
+ req, out := c.GetBucketTaggingRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) {
+ req, out := c.GetBucketTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketVersioning = "GetBucketVersioning"
+
+// GetBucketVersioningRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketVersioning operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketVersioning for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketVersioning method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketVersioningRequest method.
+// req, resp := client.GetBucketVersioningRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
+func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) {
+ op := &request.Operation{
+ Name: opGetBucketVersioning,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?versioning",
+ }
+
+ if input == nil {
+ input = &GetBucketVersioningInput{}
+ }
+
+ output = &GetBucketVersioningOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketVersioning API operation for Amazon Simple Storage Service.
+//
+// Returns the versioning state of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketVersioning for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
+func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) {
+ req, out := c.GetBucketVersioningRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketVersioning for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) {
+ req, out := c.GetBucketVersioningRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketWebsite = "GetBucketWebsite"
+
+// GetBucketWebsiteRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketWebsite operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketWebsite for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketWebsite method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketWebsiteRequest method.
+// req, resp := client.GetBucketWebsiteRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
+func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) {
+ op := &request.Operation{
+ Name: opGetBucketWebsite,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &GetBucketWebsiteInput{}
+ }
+
+ output = &GetBucketWebsiteOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketWebsite API operation for Amazon Simple Storage Service.
+//
+// Returns the website configuration for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketWebsite for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
+func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) {
+ req, out := c.GetBucketWebsiteRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) {
+ req, out := c.GetBucketWebsiteRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObject = "GetObject"
+
+// GetObjectRequest generates a "aws/request.Request" representing the
+// client's request for the GetObject operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetObject for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetObject method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetObjectRequest method.
+// req, resp := client.GetObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
+func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) {
+ op := &request.Operation{
+ Name: opGetObject,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &GetObjectInput{}
+ }
+
+ output = &GetObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObject API operation for Amazon Simple Storage Service.
+//
+// Retrieves objects from Amazon S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObject for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchKey "NoSuchKey"
+// The specified key does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
+func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) {
+ req, out := c.GetObjectRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectWithContext is the same as GetObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) {
+ req, out := c.GetObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObjectAcl = "GetObjectAcl"
+
+// GetObjectAclRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectAcl operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetObjectAcl for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetObjectAcl method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetObjectAclRequest method.
+// req, resp := client.GetObjectAclRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
+func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) {
+ op := &request.Operation{
+ Name: opGetObjectAcl,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?acl",
+ }
+
+ if input == nil {
+ input = &GetObjectAclInput{}
+ }
+
+ output = &GetObjectAclOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObjectAcl API operation for Amazon Simple Storage Service.
+//
+// Returns the access control list (ACL) of an object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectAcl for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchKey "NoSuchKey"
+// The specified key does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
+func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) {
+ req, out := c.GetObjectAclRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectAclWithContext is the same as GetObjectAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) {
+ req, out := c.GetObjectAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObjectTagging = "GetObjectTagging"
+
+// GetObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetObjectTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetObjectTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetObjectTaggingRequest method.
+// req, resp := client.GetObjectTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
+func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) {
+ op := &request.Operation{
+ Name: opGetObjectTagging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?tagging",
+ }
+
+ if input == nil {
+ input = &GetObjectTaggingInput{}
+ }
+
+ output = &GetObjectTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Returns the tag-set of an object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
+func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) {
+ req, out := c.GetObjectTaggingRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) {
+ req, out := c.GetObjectTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObjectTorrent = "GetObjectTorrent"
+
+// GetObjectTorrentRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectTorrent operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetObjectTorrent for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetObjectTorrent method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetObjectTorrentRequest method.
+// req, resp := client.GetObjectTorrentRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
+func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) {
+ op := &request.Operation{
+ Name: opGetObjectTorrent,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?torrent",
+ }
+
+ if input == nil {
+ input = &GetObjectTorrentInput{}
+ }
+
+ output = &GetObjectTorrentOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObjectTorrent API operation for Amazon Simple Storage Service.
+//
+// Return torrent files from a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectTorrent for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
+func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) {
+ req, out := c.GetObjectTorrentRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectTorrent for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) {
+ req, out := c.GetObjectTorrentRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opHeadBucket = "HeadBucket"
+
+// HeadBucketRequest generates a "aws/request.Request" representing the
+// client's request for the HeadBucket operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See HeadBucket for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the HeadBucket method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the HeadBucketRequest method.
+// req, resp := client.HeadBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
+func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) {
+ op := &request.Operation{
+ Name: opHeadBucket,
+ HTTPMethod: "HEAD",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &HeadBucketInput{}
+ }
+
+ output = &HeadBucketOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// HeadBucket API operation for Amazon Simple Storage Service.
+//
+// This operation is useful to determine if a bucket exists and you have permission
+// to access it.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation HeadBucket for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchBucket "NoSuchBucket"
+// The specified bucket does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
+func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) {
+ req, out := c.HeadBucketRequest(input)
+ return out, req.Send()
+}
+
+// HeadBucketWithContext is the same as HeadBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See HeadBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) {
+ req, out := c.HeadBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opHeadObject = "HeadObject"
+
+// HeadObjectRequest generates a "aws/request.Request" representing the
+// client's request for the HeadObject operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See HeadObject for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the HeadObject method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the HeadObjectRequest method.
+// req, resp := client.HeadObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
+func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) {
+ op := &request.Operation{
+ Name: opHeadObject,
+ HTTPMethod: "HEAD",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &HeadObjectInput{}
+ }
+
+ output = &HeadObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// HeadObject API operation for Amazon Simple Storage Service.
+//
+// The HEAD operation retrieves metadata from an object without returning the
+// object itself. This operation is useful if you're only interested in an object's
+// metadata. To use HEAD, you must have READ access to the object.
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses
+// for more information on returned errors.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation HeadObject for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
+func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) {
+ req, out := c.HeadObjectRequest(input)
+ return out, req.Send()
+}
+
+// HeadObjectWithContext is the same as HeadObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See HeadObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) {
+ req, out := c.HeadObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations"
+
+// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListBucketAnalyticsConfigurations for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListBucketAnalyticsConfigurations method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method.
+// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
+func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) {
+ op := &request.Operation{
+ Name: opListBucketAnalyticsConfigurations,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?analytics",
+ }
+
+ if input == nil {
+ input = &ListBucketAnalyticsConfigurationsInput{}
+ }
+
+ output = &ListBucketAnalyticsConfigurationsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service.
+//
+// Lists the analytics configurations for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketAnalyticsConfigurations for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
+func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) {
+ req, out := c.ListBucketAnalyticsConfigurationsRequest(input)
+ return out, req.Send()
+}
+
+// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketAnalyticsConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) {
+ req, out := c.ListBucketAnalyticsConfigurationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations"
+
+// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketInventoryConfigurations operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListBucketInventoryConfigurations for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListBucketInventoryConfigurations method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the ListBucketInventoryConfigurationsRequest method.
+// req, resp := client.ListBucketInventoryConfigurationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
+func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) {
+ op := &request.Operation{
+ Name: opListBucketInventoryConfigurations,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+
+ if input == nil {
+ input = &ListBucketInventoryConfigurationsInput{}
+ }
+
+ output = &ListBucketInventoryConfigurationsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service.
+//
+// Returns a list of inventory configurations for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketInventoryConfigurations for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
+func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) {
+ req, out := c.ListBucketInventoryConfigurationsRequest(input)
+ return out, req.Send()
+}
+
+// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketInventoryConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) {
+ req, out := c.ListBucketInventoryConfigurationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations"
+
+// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketMetricsConfigurations operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListBucketMetricsConfigurations for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListBucketMetricsConfigurations method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the ListBucketMetricsConfigurationsRequest method.
+// req, resp := client.ListBucketMetricsConfigurationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
+func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) {
+ op := &request.Operation{
+ Name: opListBucketMetricsConfigurations,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?metrics",
+ }
+
+ if input == nil {
+ input = &ListBucketMetricsConfigurationsInput{}
+ }
+
+ output = &ListBucketMetricsConfigurationsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service.
+//
+// Lists the metrics configurations for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketMetricsConfigurations for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
+func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) {
+ req, out := c.ListBucketMetricsConfigurationsRequest(input)
+ return out, req.Send()
+}
+
+// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketMetricsConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) {
+ req, out := c.ListBucketMetricsConfigurationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListBuckets = "ListBuckets"
+
+// ListBucketsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBuckets operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListBuckets for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListBuckets method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the ListBucketsRequest method.
+// req, resp := client.ListBucketsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
+func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) {
+ op := &request.Operation{
+ Name: opListBuckets,
+ HTTPMethod: "GET",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListBucketsInput{}
+ }
+
+ output = &ListBucketsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListBuckets API operation for Amazon Simple Storage Service.
+//
+// Returns a list of all buckets owned by the authenticated sender of the request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBuckets for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
+func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) {
+ req, out := c.ListBucketsRequest(input)
+ return out, req.Send()
+}
+
+// ListBucketsWithContext is the same as ListBuckets with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBuckets for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) {
+ req, out := c.ListBucketsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListMultipartUploads = "ListMultipartUploads"
+
+// ListMultipartUploadsRequest generates a "aws/request.Request" representing the
+// client's request for the ListMultipartUploads operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListMultipartUploads for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListMultipartUploads method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the ListMultipartUploadsRequest method.
+// req, resp := client.ListMultipartUploadsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
+func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) {
+ op := &request.Operation{
+ Name: opListMultipartUploads,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?uploads",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"KeyMarker", "UploadIdMarker"},
+ OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"},
+ LimitToken: "MaxUploads",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListMultipartUploadsInput{}
+ }
+
+ output = &ListMultipartUploadsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListMultipartUploads API operation for Amazon Simple Storage Service.
+//
+// This operation lists in-progress multipart uploads.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListMultipartUploads for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
+func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) {
+ req, out := c.ListMultipartUploadsRequest(input)
+ return out, req.Send()
+}
+
+// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListMultipartUploads for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) {
+ req, out := c.ListMultipartUploadsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListMultipartUploads method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListMultipartUploads operation.
+// pageNum := 0
+// err := client.ListMultipartUploadsPages(params,
+// func(page *ListMultipartUploadsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error {
+ return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListMultipartUploadsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListMultipartUploadsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opListObjectVersions = "ListObjectVersions"
+
+// ListObjectVersionsRequest generates a "aws/request.Request" representing the
+// client's request for the ListObjectVersions operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListObjectVersions for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListObjectVersions method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the ListObjectVersionsRequest method.
+// req, resp := client.ListObjectVersionsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
+func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) {
+ op := &request.Operation{
+ Name: opListObjectVersions,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?versions",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"KeyMarker", "VersionIdMarker"},
+ OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectVersionsInput{}
+ }
+
+ output = &ListObjectVersionsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListObjectVersions API operation for Amazon Simple Storage Service.
+//
+// Returns metadata about all of the versions of objects in a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjectVersions for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
+func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) {
+ req, out := c.ListObjectVersionsRequest(input)
+ return out, req.Send()
+}
+
+// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjectVersions for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) {
+ req, out := c.ListObjectVersionsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListObjectVersions method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListObjectVersions operation.
+// pageNum := 0
+// err := client.ListObjectVersionsPages(params,
+// func(page *ListObjectVersionsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error {
+ return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListObjectVersionsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListObjectVersionsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opListObjects = "ListObjects"
+
+// ListObjectsRequest generates a "aws/request.Request" representing the
+// client's request for the ListObjects operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListObjects for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListObjects method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the ListObjectsRequest method.
+// req, resp := client.ListObjectsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
+func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) {
+ op := &request.Operation{
+ Name: opListObjects,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"Marker"},
+ OutputTokens: []string{"NextMarker || Contents[-1].Key"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectsInput{}
+ }
+
+ output = &ListObjectsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListObjects API operation for Amazon Simple Storage Service.
+//
+// Returns some or all (up to 1000) of the objects in a bucket. You can use
+// the request parameters as selection criteria to return a subset of the objects
+// in a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjects for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchBucket "NoSuchBucket"
+// The specified bucket does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
+func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) {
+ req, out := c.ListObjectsRequest(input)
+ return out, req.Send()
+}
+
+// ListObjectsWithContext is the same as ListObjects with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjects for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) {
+ req, out := c.ListObjectsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListObjectsPages iterates over the pages of a ListObjects operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListObjects method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListObjects operation.
+// pageNum := 0
+// err := client.ListObjectsPages(params,
+// func(page *ListObjectsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error {
+ return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectsPagesWithContext same as ListObjectsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListObjectsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListObjectsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opListObjectsV2 = "ListObjectsV2"
+
+// ListObjectsV2Request generates a "aws/request.Request" representing the
+// client's request for the ListObjectsV2 operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListObjectsV2 for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListObjectsV2 method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the ListObjectsV2Request method.
+// req, resp := client.ListObjectsV2Request(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
+func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) {
+ op := &request.Operation{
+ Name: opListObjectsV2,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?list-type=2",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"ContinuationToken"},
+ OutputTokens: []string{"NextContinuationToken"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectsV2Input{}
+ }
+
+ output = &ListObjectsV2Output{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListObjectsV2 API operation for Amazon Simple Storage Service.
+//
+// Returns some or all (up to 1000) of the objects in a bucket. You can use
+// the request parameters as selection criteria to return a subset of the objects
+// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend
+// you use this revised API for new application development.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjectsV2 for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchBucket "NoSuchBucket"
+// The specified bucket does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
+func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) {
+ req, out := c.ListObjectsV2Request(input)
+ return out, req.Send()
+}
+
+// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjectsV2 for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) {
+ req, out := c.ListObjectsV2Request(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListObjectsV2 method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListObjectsV2 operation.
+// pageNum := 0
+// err := client.ListObjectsV2Pages(params,
+// func(page *ListObjectsV2Output, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error {
+ return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListObjectsV2Input
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListObjectsV2Request(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opListParts = "ListParts"
+
+// ListPartsRequest generates a "aws/request.Request" representing the
+// client's request for the ListParts operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListParts for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListParts method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the ListPartsRequest method.
+// req, resp := client.ListPartsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
+func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) {
+ op := &request.Operation{
+ Name: opListParts,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"PartNumberMarker"},
+ OutputTokens: []string{"NextPartNumberMarker"},
+ LimitToken: "MaxParts",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListPartsInput{}
+ }
+
+ output = &ListPartsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListParts API operation for Amazon Simple Storage Service.
+//
+// Lists the parts that have been uploaded for a specific multipart upload.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListParts for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
+func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) {
+ req, out := c.ListPartsRequest(input)
+ return out, req.Send()
+}
+
+// ListPartsWithContext is the same as ListParts with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListParts for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) {
+ req, out := c.ListPartsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListPartsPages iterates over the pages of a ListParts operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListParts method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListParts operation.
+// pageNum := 0
+// err := client.ListPartsPages(params,
+// func(page *ListPartsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error {
+ return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListPartsPagesWithContext same as ListPartsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListPartsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListPartsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
+
+// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketAccelerateConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketAccelerateConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketAccelerateConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketAccelerateConfigurationRequest method.
+// req, resp := client.PutBucketAccelerateConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
+func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketAccelerateConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?accelerate",
+ }
+
+ if input == nil {
+ input = &PutBucketAccelerateConfigurationInput{}
+ }
+
+ output = &PutBucketAccelerateConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets the accelerate configuration of an existing bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAccelerateConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
+func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) {
+ req, out := c.PutBucketAccelerateConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAccelerateConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) {
+ req, out := c.PutBucketAccelerateConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketAcl = "PutBucketAcl"
+
+// PutBucketAclRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketAcl operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketAcl for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketAcl method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketAclRequest method.
+// req, resp := client.PutBucketAclRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
+func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) {
+ op := &request.Operation{
+ Name: opPutBucketAcl,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?acl",
+ }
+
+ if input == nil {
+ input = &PutBucketAclInput{}
+ }
+
+ output = &PutBucketAclOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketAcl API operation for Amazon Simple Storage Service.
+//
+// Sets the permissions on a bucket using access control lists (ACL).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAcl for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
+func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) {
+ req, out := c.PutBucketAclRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketAclWithContext is the same as PutBucketAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) {
+ req, out := c.PutBucketAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration"
+
+// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketAnalyticsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketAnalyticsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method.
+// req, resp := client.PutBucketAnalyticsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
+func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketAnalyticsConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?analytics",
+ }
+
+ if input == nil {
+ input = &PutBucketAnalyticsConfigurationInput{}
+ }
+
+ output = &PutBucketAnalyticsConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAnalyticsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
+func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.PutBucketAnalyticsConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAnalyticsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.PutBucketAnalyticsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketCors = "PutBucketCors"
+
+// PutBucketCorsRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketCors operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketCors for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketCors method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketCorsRequest method.
+// req, resp := client.PutBucketCorsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
+func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) {
+ op := &request.Operation{
+ Name: opPutBucketCors,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &PutBucketCorsInput{}
+ }
+
+ output = &PutBucketCorsOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketCors API operation for Amazon Simple Storage Service.
+//
+// Sets the cors configuration for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketCors for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
+func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) {
+ req, out := c.PutBucketCorsRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketCorsWithContext is the same as PutBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) {
+ req, out := c.PutBucketCorsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration"
+
+// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketInventoryConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketInventoryConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketInventoryConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketInventoryConfigurationRequest method.
+// req, resp := client.PutBucketInventoryConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
+func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketInventoryConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+
+ if input == nil {
+ input = &PutBucketInventoryConfigurationInput{}
+ }
+
+ output = &PutBucketInventoryConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
+//
+// Adds an inventory configuration (identified by the inventory ID) from the
+// bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketInventoryConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
+func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) {
+ req, out := c.PutBucketInventoryConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketInventoryConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) {
+ req, out := c.PutBucketInventoryConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketLifecycle = "PutBucketLifecycle"
+
+// PutBucketLifecycleRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketLifecycle operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketLifecycle for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketLifecycle method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketLifecycleRequest method.
+// req, resp := client.PutBucketLifecycleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
+func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) {
+ if c.Client.Config.Logger != nil {
+ c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated")
+ }
+ op := &request.Operation{
+ Name: opPutBucketLifecycle,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &PutBucketLifecycleInput{}
+ }
+
+ output = &PutBucketLifecycleOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketLifecycle API operation for Amazon Simple Storage Service.
+//
+// Deprecated, see the PutBucketLifecycleConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLifecycle for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
+func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
+ req, out := c.PutBucketLifecycleRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) {
+ req, out := c.PutBucketLifecycleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
+
+// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketLifecycleConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketLifecycleConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketLifecycleConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketLifecycleConfigurationRequest method.
+// req, resp := client.PutBucketLifecycleConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
+func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketLifecycleConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &PutBucketLifecycleConfigurationInput{}
+ }
+
+ output = &PutBucketLifecycleConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets lifecycle configuration for your bucket. If a lifecycle configuration
+// exists, it replaces it.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLifecycleConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
+func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) {
+ req, out := c.PutBucketLifecycleConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLifecycleConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) {
+ req, out := c.PutBucketLifecycleConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketLogging = "PutBucketLogging"
+
+// PutBucketLoggingRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketLogging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketLogging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketLogging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketLoggingRequest method.
+// req, resp := client.PutBucketLoggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
+func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) {
+ op := &request.Operation{
+ Name: opPutBucketLogging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?logging",
+ }
+
+ if input == nil {
+ input = &PutBucketLoggingInput{}
+ }
+
+ output = &PutBucketLoggingOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketLogging API operation for Amazon Simple Storage Service.
+//
+// Set the logging parameters for a bucket and to specify permissions for who
+// can view and modify the logging parameters. To set the logging status of
+// a bucket, you must be the bucket owner.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLogging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
+func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) {
+ req, out := c.PutBucketLoggingRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLogging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) {
+ req, out := c.PutBucketLoggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration"
+
+// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketMetricsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketMetricsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketMetricsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketMetricsConfigurationRequest method.
+// req, resp := client.PutBucketMetricsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
+func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketMetricsConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?metrics",
+ }
+
+ if input == nil {
+ input = &PutBucketMetricsConfigurationInput{}
+ }
+
+ output = &PutBucketMetricsConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets a metrics configuration (specified by the metrics configuration ID)
+// for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketMetricsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
+func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) {
+ req, out := c.PutBucketMetricsConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketMetricsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) {
+ req, out := c.PutBucketMetricsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketNotification = "PutBucketNotification"
+
+// PutBucketNotificationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketNotification operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketNotification for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketNotification method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketNotificationRequest method.
+// req, resp := client.PutBucketNotificationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
+func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) {
+ if c.Client.Config.Logger != nil {
+ c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated")
+ }
+ op := &request.Operation{
+ Name: opPutBucketNotification,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &PutBucketNotificationInput{}
+ }
+
+ output = &PutBucketNotificationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketNotification API operation for Amazon Simple Storage Service.
+//
+// Deprecated, see the PutBucketNotificationConfiguraiton operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketNotification for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
+func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
+ req, out := c.PutBucketNotificationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketNotification for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) {
+ req, out := c.PutBucketNotificationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration"
+
+// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketNotificationConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketNotificationConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketNotificationConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketNotificationConfigurationRequest method.
+// req, resp := client.PutBucketNotificationConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
+func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketNotificationConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &PutBucketNotificationConfigurationInput{}
+ }
+
+ output = &PutBucketNotificationConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
+//
+// Enables notifications of specified events for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketNotificationConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
+func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) {
+ req, out := c.PutBucketNotificationConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketNotificationConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) {
+ req, out := c.PutBucketNotificationConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketPolicy = "PutBucketPolicy"
+
+// PutBucketPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketPolicy operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketPolicy for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketPolicy method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketPolicyRequest method.
+// req, resp := client.PutBucketPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
+func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) {
+ op := &request.Operation{
+ Name: opPutBucketPolicy,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?policy",
+ }
+
+ if input == nil {
+ input = &PutBucketPolicyInput{}
+ }
+
+ output = &PutBucketPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketPolicy API operation for Amazon Simple Storage Service.
+//
+// Replaces a policy on a bucket. If the bucket already has a policy, the one
+// in this request completely replaces it.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketPolicy for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
+func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) {
+ req, out := c.PutBucketPolicyRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) {
+ req, out := c.PutBucketPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketReplication = "PutBucketReplication"
+
+// PutBucketReplicationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketReplication operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketReplication for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketReplication method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketReplicationRequest method.
+// req, resp := client.PutBucketReplicationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
+func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketReplication,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &PutBucketReplicationInput{}
+ }
+
+ output = &PutBucketReplicationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketReplication API operation for Amazon Simple Storage Service.
+//
+// Creates a new replication configuration (or replaces an existing one, if
+// present).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketReplication for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
+func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
+ req, out := c.PutBucketReplicationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) {
+ req, out := c.PutBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketRequestPayment = "PutBucketRequestPayment"
+
+// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketRequestPayment operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketRequestPayment for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketRequestPayment method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketRequestPaymentRequest method.
+// req, resp := client.PutBucketRequestPaymentRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
+func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) {
+ op := &request.Operation{
+ Name: opPutBucketRequestPayment,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?requestPayment",
+ }
+
+ if input == nil {
+ input = &PutBucketRequestPaymentInput{}
+ }
+
+ output = &PutBucketRequestPaymentOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketRequestPayment API operation for Amazon Simple Storage Service.
+//
+// Sets the request payment configuration for a bucket. By default, the bucket
+// owner pays for downloads from the bucket. This configuration parameter enables
+// the bucket owner (only) to specify that the person requesting the download
+// will be charged for the download. Documentation on requester pays buckets
+// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketRequestPayment for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
+func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) {
+ req, out := c.PutBucketRequestPaymentRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketRequestPayment for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) {
+ req, out := c.PutBucketRequestPaymentRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketTagging = "PutBucketTagging"
+
+// PutBucketTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketTaggingRequest method.
+// req, resp := client.PutBucketTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
+func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) {
+ op := &request.Operation{
+ Name: opPutBucketTagging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?tagging",
+ }
+
+ if input == nil {
+ input = &PutBucketTaggingInput{}
+ }
+
+ output = &PutBucketTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketTagging API operation for Amazon Simple Storage Service.
+//
+// Sets the tags for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
+func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) {
+ req, out := c.PutBucketTaggingRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) {
+ req, out := c.PutBucketTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketVersioning = "PutBucketVersioning"
+
+// PutBucketVersioningRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketVersioning operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketVersioning for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketVersioning method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketVersioningRequest method.
+// req, resp := client.PutBucketVersioningRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
+func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) {
+ op := &request.Operation{
+ Name: opPutBucketVersioning,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?versioning",
+ }
+
+ if input == nil {
+ input = &PutBucketVersioningInput{}
+ }
+
+ output = &PutBucketVersioningOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketVersioning API operation for Amazon Simple Storage Service.
+//
+// Sets the versioning state of an existing bucket. To set the versioning state,
+// you must be the bucket owner.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketVersioning for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
+func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) {
+ req, out := c.PutBucketVersioningRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketVersioning for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) {
+ req, out := c.PutBucketVersioningRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketWebsite = "PutBucketWebsite"
+
+// PutBucketWebsiteRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketWebsite operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketWebsite for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketWebsite method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketWebsiteRequest method.
+// req, resp := client.PutBucketWebsiteRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
+func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) {
+ op := &request.Operation{
+ Name: opPutBucketWebsite,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &PutBucketWebsiteInput{}
+ }
+
+ output = &PutBucketWebsiteOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketWebsite API operation for Amazon Simple Storage Service.
+//
+// Set the website configuration for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketWebsite for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
+func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) {
+ req, out := c.PutBucketWebsiteRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) {
+ req, out := c.PutBucketWebsiteRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutObject = "PutObject"
+
+// PutObjectRequest generates a "aws/request.Request" representing the
+// client's request for the PutObject operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutObject for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutObject method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutObjectRequest method.
+// req, resp := client.PutObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
+func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) {
+ op := &request.Operation{
+ Name: opPutObject,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &PutObjectInput{}
+ }
+
+ output = &PutObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutObject API operation for Amazon Simple Storage Service.
+//
+// Adds an object to a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObject for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
+func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) {
+ req, out := c.PutObjectRequest(input)
+ return out, req.Send()
+}
+
+// PutObjectWithContext is the same as PutObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) {
+ req, out := c.PutObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutObjectAcl = "PutObjectAcl"
+
+// PutObjectAclRequest generates a "aws/request.Request" representing the
+// client's request for the PutObjectAcl operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutObjectAcl for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutObjectAcl method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutObjectAclRequest method.
+// req, resp := client.PutObjectAclRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
+func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) {
+ op := &request.Operation{
+ Name: opPutObjectAcl,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}?acl",
+ }
+
+ if input == nil {
+ input = &PutObjectAclInput{}
+ }
+
+ output = &PutObjectAclOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutObjectAcl API operation for Amazon Simple Storage Service.
+//
+// uses the acl subresource to set the access control list (ACL) permissions
+// for an object that already exists in a bucket
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectAcl for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchKey "NoSuchKey"
+// The specified key does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
+func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) {
+ req, out := c.PutObjectAclRequest(input)
+ return out, req.Send()
+}
+
+// PutObjectAclWithContext is the same as PutObjectAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) {
+ req, out := c.PutObjectAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutObjectTagging = "PutObjectTagging"
+
+// PutObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the PutObjectTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutObjectTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutObjectTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutObjectTaggingRequest method.
+// req, resp := client.PutObjectTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
+func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) {
+ op := &request.Operation{
+ Name: opPutObjectTagging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}?tagging",
+ }
+
+ if input == nil {
+ input = &PutObjectTaggingInput{}
+ }
+
+ output = &PutObjectTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Sets the supplied tag-set to an object that already exists in a bucket
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
+func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) {
+ req, out := c.PutObjectTaggingRequest(input)
+ return out, req.Send()
+}
+
+// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) {
+ req, out := c.PutObjectTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opRestoreObject = "RestoreObject"
+
+// RestoreObjectRequest generates a "aws/request.Request" representing the
+// client's request for the RestoreObject operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See RestoreObject for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the RestoreObject method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the RestoreObjectRequest method.
+// req, resp := client.RestoreObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
+func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) {
+ op := &request.Operation{
+ Name: opRestoreObject,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?restore",
+ }
+
+ if input == nil {
+ input = &RestoreObjectInput{}
+ }
+
+ output = &RestoreObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// RestoreObject API operation for Amazon Simple Storage Service.
+//
+// Restores an archived copy of an object back into Amazon S3
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation RestoreObject for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError"
+// This operation is not allowed against this storage tier
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
+func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) {
+ req, out := c.RestoreObjectRequest(input)
+ return out, req.Send()
+}
+
+// RestoreObjectWithContext is the same as RestoreObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RestoreObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) {
+ req, out := c.RestoreObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUploadPart = "UploadPart"
+
+// UploadPartRequest generates a "aws/request.Request" representing the
+// client's request for the UploadPart operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See UploadPart for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the UploadPart method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the UploadPartRequest method.
+// req, resp := client.UploadPartRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
+func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) {
+ op := &request.Operation{
+ Name: opUploadPart,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &UploadPartInput{}
+ }
+
+ output = &UploadPartOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UploadPart API operation for Amazon Simple Storage Service.
+//
+// Uploads a part in a multipart upload.
+//
+// Note: After you initiate multipart upload and upload one or more parts, you
+// must either complete or abort multipart upload in order to stop getting charged
+// for storage of the uploaded parts. Only after you either complete or abort
+// multipart upload, Amazon S3 frees up the parts storage and stops charging
+// you for the parts storage.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation UploadPart for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
+func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) {
+ req, out := c.UploadPartRequest(input)
+ return out, req.Send()
+}
+
+// UploadPartWithContext is the same as UploadPart with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UploadPart for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) {
+ req, out := c.UploadPartRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUploadPartCopy = "UploadPartCopy"
+
+// UploadPartCopyRequest generates a "aws/request.Request" representing the
+// client's request for the UploadPartCopy operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See UploadPartCopy for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the UploadPartCopy method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the UploadPartCopyRequest method.
+// req, resp := client.UploadPartCopyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
+func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) {
+ op := &request.Operation{
+ Name: opUploadPartCopy,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &UploadPartCopyInput{}
+ }
+
+ output = &UploadPartCopyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UploadPartCopy API operation for Amazon Simple Storage Service.
+//
+// Uploads a part by copying data from an existing object as data source.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation UploadPartCopy for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
+func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) {
+ req, out := c.UploadPartCopyRequest(input)
+ return out, req.Send()
+}
+
+// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UploadPartCopy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) {
+ req, out := c.UploadPartCopyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// Specifies the days since the initiation of an Incomplete Multipart Upload
+// that Lifecycle will wait before permanently removing all parts of the upload.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload
+type AbortIncompleteMultipartUpload struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates the number of days that must pass since initiation for Lifecycle
+ // to abort an Incomplete Multipart Upload.
+ DaysAfterInitiation *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s AbortIncompleteMultipartUpload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AbortIncompleteMultipartUpload) GoString() string {
+ return s.String()
+}
+
+// SetDaysAfterInitiation sets the DaysAfterInitiation field's value.
+func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload {
+ s.DaysAfterInitiation = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest
+type AbortMultipartUploadInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AbortMultipartUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AbortMultipartUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AbortMultipartUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput
+type AbortMultipartUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s AbortMultipartUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AbortMultipartUploadOutput) GoString() string {
+ return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration
+type AccelerateConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // The accelerate configuration of the bucket.
+ Status *string `type:"string" enum:"BucketAccelerateStatus"`
+}
+
+// String returns the string representation
+func (s AccelerateConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccelerateConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetStatus sets the Status field's value.
+func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration {
+ s.Status = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy
+type AccessControlPolicy struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation
+func (s AccessControlPolicy) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccessControlPolicy) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AccessControlPolicy) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"}
+ if s.Grants != nil {
+ for i, v := range s.Grants {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGrants sets the Grants field's value.
+func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy {
+ s.Grants = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy {
+ s.Owner = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator
+type AnalyticsAndOperator struct {
+ _ struct{} `type:"structure"`
+
+ // The prefix to use when evaluating an AND predicate.
+ Prefix *string `type:"string"`
+
+ // The list of tags to use when evaluating an AND predicate.
+ Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s AnalyticsAndOperator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsAndOperator) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsAndOperator) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator {
+ s.Prefix = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator {
+ s.Tags = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration
+type AnalyticsConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // The filter used to describe a set of objects for analyses. A filter must
+ // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator).
+ // If no filter is provided, all objects will be considered in any analysis.
+ Filter *AnalyticsFilter `type:"structure"`
+
+ // The identifier used to represent an analytics configuration.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+
+ // If present, it indicates that data related to access patterns will be collected
+ // and made available to analyze the tradeoffs between different storage classes.
+ //
+ // StorageClassAnalysis is a required field
+ StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s AnalyticsConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"}
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.StorageClassAnalysis == nil {
+ invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis"))
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.StorageClassAnalysis != nil {
+ if err := s.StorageClassAnalysis.Validate(); err != nil {
+ invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFilter sets the Filter field's value.
+func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetStorageClassAnalysis sets the StorageClassAnalysis field's value.
+func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration {
+ s.StorageClassAnalysis = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination
+type AnalyticsExportDestination struct {
+ _ struct{} `type:"structure"`
+
+ // A destination signifying output to an S3 bucket.
+ //
+ // S3BucketDestination is a required field
+ S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s AnalyticsExportDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsExportDestination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsExportDestination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"}
+ if s.S3BucketDestination == nil {
+ invalidParams.Add(request.NewErrParamRequired("S3BucketDestination"))
+ }
+ if s.S3BucketDestination != nil {
+ if err := s.S3BucketDestination.Validate(); err != nil {
+ invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetS3BucketDestination sets the S3BucketDestination field's value.
+func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination {
+ s.S3BucketDestination = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter
+type AnalyticsFilter struct {
+ _ struct{} `type:"structure"`
+
+ // A conjunction (logical AND) of predicates, which is used in evaluating an
+ // analytics filter. The operator must have at least two predicates.
+ And *AnalyticsAndOperator `type:"structure"`
+
+ // The prefix to use when evaluating an analytics filter.
+ Prefix *string `type:"string"`
+
+ // The tag to use when evaluating an analytics filter.
+ Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s AnalyticsFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"}
+ if s.And != nil {
+ if err := s.And.Validate(); err != nil {
+ invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tag != nil {
+ if err := s.Tag.Validate(); err != nil {
+ invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter {
+ s.And = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter {
+ s.Prefix = &v
+ return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter {
+ s.Tag = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination
+type AnalyticsS3BucketDestination struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon resource name (ARN) of the bucket to which data is exported.
+ //
+ // Bucket is a required field
+ Bucket *string `type:"string" required:"true"`
+
+ // The account ID that owns the destination bucket. If no account ID is provided,
+ // the owner will not be validated prior to exporting data.
+ BucketAccountId *string `type:"string"`
+
+ // The file format used when exporting data to Amazon S3.
+ //
+ // Format is a required field
+ Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"`
+
+ // The prefix to use when exporting data. The exported data begins with this
+ // prefix.
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s AnalyticsS3BucketDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsS3BucketDestination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsS3BucketDestination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Format == nil {
+ invalidParams.Add(request.NewErrParamRequired("Format"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination {
+ s.Bucket = &v
+ return s
+}
+
+// SetBucketAccountId sets the BucketAccountId field's value.
+func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination {
+ s.BucketAccountId = &v
+ return s
+}
+
+// SetFormat sets the Format field's value.
+func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination {
+ s.Format = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination {
+ s.Prefix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket
+type Bucket struct {
+ _ struct{} `type:"structure"`
+
+ // Date the bucket was created.
+ CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // The name of the bucket.
+ Name *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Bucket) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Bucket) GoString() string {
+ return s.String()
+}
+
+// SetCreationDate sets the CreationDate field's value.
+func (s *Bucket) SetCreationDate(v time.Time) *Bucket {
+ s.CreationDate = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Bucket) SetName(v string) *Bucket {
+ s.Name = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration
+type BucketLifecycleConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Rules is a required field
+ Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s BucketLifecycleConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BucketLifecycleConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BucketLifecycleConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"}
+ if s.Rules == nil {
+ invalidParams.Add(request.NewErrParamRequired("Rules"))
+ }
+ if s.Rules != nil {
+ for i, v := range s.Rules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRules sets the Rules field's value.
+func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration {
+ s.Rules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus
+type BucketLoggingStatus struct {
+ _ struct{} `type:"structure"`
+
+ LoggingEnabled *LoggingEnabled `type:"structure"`
+}
+
+// String returns the string representation
+func (s BucketLoggingStatus) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BucketLoggingStatus) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BucketLoggingStatus) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"}
+ if s.LoggingEnabled != nil {
+ if err := s.LoggingEnabled.Validate(); err != nil {
+ invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLoggingEnabled sets the LoggingEnabled field's value.
+func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus {
+ s.LoggingEnabled = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration
+type CORSConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // CORSRules is a required field
+ CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s CORSConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CORSConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CORSConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"}
+ if s.CORSRules == nil {
+ invalidParams.Add(request.NewErrParamRequired("CORSRules"))
+ }
+ if s.CORSRules != nil {
+ for i, v := range s.CORSRules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCORSRules sets the CORSRules field's value.
+func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration {
+ s.CORSRules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule
+type CORSRule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies which headers are allowed in a pre-flight OPTIONS request.
+ AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"`
+
+ // Identifies HTTP methods that the domain/origin specified in the rule is allowed
+ // to execute.
+ //
+ // AllowedMethods is a required field
+ AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"`
+
+ // One or more origins you want customers to be able to access the bucket from.
+ //
+ // AllowedOrigins is a required field
+ AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"`
+
+ // One or more headers in the response that you want customers to be able to
+ // access from their applications (for example, from a JavaScript XMLHttpRequest
+ // object).
+ ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"`
+
+ // The time in seconds that your browser is to cache the preflight response
+ // for the specified resource.
+ MaxAgeSeconds *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s CORSRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CORSRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CORSRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CORSRule"}
+ if s.AllowedMethods == nil {
+ invalidParams.Add(request.NewErrParamRequired("AllowedMethods"))
+ }
+ if s.AllowedOrigins == nil {
+ invalidParams.Add(request.NewErrParamRequired("AllowedOrigins"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAllowedHeaders sets the AllowedHeaders field's value.
+func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule {
+ s.AllowedHeaders = v
+ return s
+}
+
+// SetAllowedMethods sets the AllowedMethods field's value.
+func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule {
+ s.AllowedMethods = v
+ return s
+}
+
+// SetAllowedOrigins sets the AllowedOrigins field's value.
+func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule {
+ s.AllowedOrigins = v
+ return s
+}
+
+// SetExposeHeaders sets the ExposeHeaders field's value.
+func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule {
+ s.ExposeHeaders = v
+ return s
+}
+
+// SetMaxAgeSeconds sets the MaxAgeSeconds field's value.
+func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule {
+ s.MaxAgeSeconds = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration
+type CloudFunctionConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ CloudFunction *string `type:"string"`
+
+ // Bucket event for which to send notifications.
+ Event *string `deprecated:"true" type:"string" enum:"Event"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ InvocationRole *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CloudFunctionConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CloudFunctionConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetCloudFunction sets the CloudFunction field's value.
+func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration {
+ s.CloudFunction = &v
+ return s
+}
+
+// SetEvent sets the Event field's value.
+func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration {
+ s.Event = &v
+ return s
+}
+
+// SetEvents sets the Events field's value.
+func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration {
+ s.Events = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetInvocationRole sets the InvocationRole field's value.
+func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration {
+ s.InvocationRole = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix
+type CommonPrefix struct {
+ _ struct{} `type:"structure"`
+
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CommonPrefix) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CommonPrefix) GoString() string {
+ return s.String()
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix {
+ s.Prefix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest
+type CompleteMultipartUploadInput struct {
+ _ struct{} `type:"structure" payload:"MultipartUpload"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CompleteMultipartUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompleteMultipartUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CompleteMultipartUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput {
+ s.Key = &v
+ return s
+}
+
+// SetMultipartUpload sets the MultipartUpload field's value.
+func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput {
+ s.MultipartUpload = v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput
+type CompleteMultipartUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `type:"string"`
+
+ // Entity tag of the object.
+ ETag *string `type:"string"`
+
+ // If the object expiration is configured, this will contain the expiration
+ // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ Key *string `min:"1" type:"string"`
+
+ Location *string `type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s CompleteMultipartUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompleteMultipartUploadOutput) GoString() string {
+ return s.String()
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput {
+ s.Bucket = &v
+ return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput {
+ s.Key = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput {
+ s.Location = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload
+type CompletedMultipartUpload struct {
+ _ struct{} `type:"structure"`
+
+ Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s CompletedMultipartUpload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompletedMultipartUpload) GoString() string {
+ return s.String()
+}
+
+// SetParts sets the Parts field's value.
+func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload {
+ s.Parts = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart
+type CompletedPart struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag returned when the part was uploaded.
+ ETag *string `type:"string"`
+
+ // Part number that identifies the part. This is a positive integer between
+ // 1 and 10,000.
+ PartNumber *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s CompletedPart) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompletedPart) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *CompletedPart) SetETag(v string) *CompletedPart {
+ s.ETag = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart {
+ s.PartNumber = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition
+type Condition struct {
+ _ struct{} `type:"structure"`
+
+ // The HTTP error code when the redirect is applied. In the event of an error,
+ // if the error code equals this value, then the specified redirect is applied.
+ // Required when parent element Condition is specified and sibling KeyPrefixEquals
+ // is not specified. If both are specified, then both must be true for the redirect
+ // to be applied.
+ HttpErrorCodeReturnedEquals *string `type:"string"`
+
+ // The object key name prefix when the redirect is applied. For example, to
+ // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html.
+ // To redirect request for all pages with the prefix docs/, the key prefix will
+ // be /docs, which identifies all objects in the docs/ folder. Required when
+ // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals
+ // is not specified. If both conditions are specified, both must be true for
+ // the redirect to be applied.
+ KeyPrefixEquals *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Condition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Condition) GoString() string {
+ return s.String()
+}
+
+// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value.
+func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition {
+ s.HttpErrorCodeReturnedEquals = &v
+ return s
+}
+
+// SetKeyPrefixEquals sets the KeyPrefixEquals field's value.
+func (s *Condition) SetKeyPrefixEquals(v string) *Condition {
+ s.KeyPrefixEquals = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest
+type CopyObjectInput struct {
+ _ struct{} `type:"structure"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The name of the source bucket and key name of the source object, separated
+ // by a slash (/). Must be URL-encoded.
+ //
+ // CopySource is a required field
+ CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
+
+ // Copies the object if it has been modified since the specified time.
+ CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Copies the object if its entity tag (ETag) is different than the specified
+ // ETag.
+ CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
+
+ // Copies the object if it hasn't been modified since the specified time.
+ CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
+ CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one
+ // that was used when the source object was created.
+ CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Specifies whether the metadata is copied from the source object or replaced
+ // with metadata provided in the request.
+ MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The tag-set for the object destination object this value must be used in
+ // conjunction with the TaggingDirective. The tag-set must be encoded as URL
+ // Query parameters
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+ // Specifies whether the object tag-set are copied from the source object or
+ // replaced with tag-set provided in the request.
+ TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s CopyObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CopyObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.CopySource == nil {
+ invalidParams.Add(request.NewErrParamRequired("CopySource"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput {
+ s.ContentType = &v
+ return s
+}
+
+// SetCopySource sets the CopySource field's value.
+func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput {
+ s.CopySource = &v
+ return s
+}
+
+// SetCopySourceIfMatch sets the CopySourceIfMatch field's value.
+func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput {
+ s.CopySourceIfMatch = &v
+ return s
+}
+
+// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value.
+func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput {
+ s.CopySourceIfModifiedSince = &v
+ return s
+}
+
+// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value.
+func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput {
+ s.CopySourceIfNoneMatch = &v
+ return s
+}
+
+// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value.
+func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput {
+ s.CopySourceIfUnmodifiedSince = &v
+ return s
+}
+
+// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput {
+ s.CopySourceSSECustomerAlgorithm = &v
+ return s
+}
+
+// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput {
+ s.CopySourceSSECustomerKey = &v
+ return s
+}
+
+// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput {
+ s.CopySourceSSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput {
+ s.Expires = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput {
+ s.Metadata = v
+ return s
+}
+
+// SetMetadataDirective sets the MetadataDirective field's value.
+func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput {
+ s.MetadataDirective = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput {
+ s.Tagging = &v
+ return s
+}
+
+// SetTaggingDirective sets the TaggingDirective field's value.
+func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput {
+ s.TaggingDirective = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput
+type CopyObjectOutput struct {
+ _ struct{} `type:"structure" payload:"CopyObjectResult"`
+
+ CopyObjectResult *CopyObjectResult `type:"structure"`
+
+ CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
+
+ // If the object expiration is configured, the response includes this header.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Version ID of the newly created copy.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s CopyObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetCopyObjectResult sets the CopyObjectResult field's value.
+func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput {
+ s.CopyObjectResult = v
+ return s
+}
+
+// SetCopySourceVersionId sets the CopySourceVersionId field's value.
+func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput {
+ s.CopySourceVersionId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult
+type CopyObjectResult struct {
+ _ struct{} `type:"structure"`
+
+ ETag *string `type:"string"`
+
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+}
+
+// String returns the string representation
+func (s CopyObjectResult) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyObjectResult) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult {
+ s.ETag = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult {
+ s.LastModified = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult
+type CopyPartResult struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag of the object.
+ ETag *string `type:"string"`
+
+ // Date and time at which the object was uploaded.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+}
+
+// String returns the string representation
+func (s CopyPartResult) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyPartResult) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *CopyPartResult) SetETag(v string) *CopyPartResult {
+ s.ETag = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult {
+ s.LastModified = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration
+type CreateBucketConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the region where the bucket will be created. If you don't specify
+ // a region, the bucket will be created in US Standard.
+ LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
+}
+
+// String returns the string representation
+func (s CreateBucketConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetLocationConstraint sets the LocationConstraint field's value.
+func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration {
+ s.LocationConstraint = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest
+type CreateBucketInput struct {
+ _ struct{} `type:"structure" payload:"CreateBucketConfiguration"`
+
+ // The canned ACL to apply to the bucket.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value.
+func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput {
+ s.CreateBucketConfiguration = v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput {
+ s.GrantWrite = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput
+type CreateBucketOutput struct {
+ _ struct{} `type:"structure"`
+
+ Location *string `location:"header" locationName:"Location" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketOutput) GoString() string {
+ return s.String()
+}
+
+// SetLocation sets the Location field's value.
+func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput {
+ s.Location = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest
+type CreateMultipartUploadInput struct {
+ _ struct{} `type:"structure"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateMultipartUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateMultipartUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateMultipartUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput {
+ s.ContentType = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput {
+ s.Expires = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput {
+ s.Key = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput {
+ s.Metadata = v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput
+type CreateMultipartUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Date when multipart upload will become eligible for abort operation by lifecycle.
+ AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Id of the lifecycle rule that makes a multipart upload eligible for abort
+ // operation.
+ AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `locationName:"Bucket" type:"string"`
+
+ // Object key for which the multipart upload was initiated.
+ Key *string `min:"1" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // ID for the initiated multipart upload.
+ UploadId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CreateMultipartUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateMultipartUploadOutput) GoString() string {
+ return s.String()
+}
+
+// SetAbortDate sets the AbortDate field's value.
+func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput {
+ s.AbortDate = &v
+ return s
+}
+
+// SetAbortRuleId sets the AbortRuleId field's value.
+func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput {
+ s.AbortRuleId = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete
+type Delete struct {
+ _ struct{} `type:"structure"`
+
+ // Objects is a required field
+ Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"`
+
+ // Element to enable quiet mode for the request. When you add this element,
+ // you must set its value to true.
+ Quiet *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s Delete) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Delete) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Delete) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Delete"}
+ if s.Objects == nil {
+ invalidParams.Add(request.NewErrParamRequired("Objects"))
+ }
+ if s.Objects != nil {
+ for i, v := range s.Objects {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetObjects sets the Objects field's value.
+func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete {
+ s.Objects = v
+ return s
+}
+
+// SetQuiet sets the Quiet field's value.
+func (s *Delete) SetQuiet(v bool) *Delete {
+ s.Quiet = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest
+type DeleteBucketAnalyticsConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket from which an analytics configuration is deleted.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The identifier used to represent an analytics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketAnalyticsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketAnalyticsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput
+type DeleteBucketAnalyticsConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketAnalyticsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest
+type DeleteBucketCorsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketCorsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketCorsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketCorsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput
+type DeleteBucketCorsOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketCorsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketCorsOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest
+type DeleteBucketInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest
+type DeleteBucketInventoryConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket containing the inventory configuration to delete.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the inventory configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInventoryConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInventoryConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketInventoryConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput
+type DeleteBucketInventoryConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInventoryConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInventoryConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest
+type DeleteBucketLifecycleInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketLifecycleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketLifecycleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketLifecycleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput
+type DeleteBucketLifecycleOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketLifecycleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketLifecycleOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest
+type DeleteBucketMetricsConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket containing the metrics configuration to delete.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the metrics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketMetricsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketMetricsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketMetricsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput
+type DeleteBucketMetricsConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketMetricsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketMetricsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput
+type DeleteBucketOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest
+type DeleteBucketPolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput
+type DeleteBucketPolicyOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketPolicyOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest
+type DeleteBucketReplicationInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketReplicationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketReplicationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketReplicationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput
+type DeleteBucketReplicationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketReplicationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketReplicationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest
+type DeleteBucketTaggingInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput
+type DeleteBucketTaggingOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest
+type DeleteBucketWebsiteInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketWebsiteInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketWebsiteInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketWebsiteInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput
+type DeleteBucketWebsiteOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketWebsiteOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketWebsiteOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry
+type DeleteMarkerEntry struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether the object is (true) or is not (false) the latest version
+ // of an object.
+ IsLatest *bool `type:"boolean"`
+
+ // The object key.
+ Key *string `min:"1" type:"string"`
+
+ // Date and time the object was last modified.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Owner *Owner `type:"structure"`
+
+ // Version ID of an object.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteMarkerEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMarkerEntry) GoString() string {
+ return s.String()
+}
+
+// SetIsLatest sets the IsLatest field's value.
+func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry {
+ s.IsLatest = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry {
+ s.Key = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry {
+ s.LastModified = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry {
+ s.Owner = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest
+type DeleteObjectInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput {
+ s.MFA = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput
+type DeleteObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether the versioned object that was permanently deleted was (true)
+ // or was not (false) a delete marker.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Returns the version ID of the delete marker created as a result of the DELETE
+ // operation.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest
+type DeleteObjectTaggingInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // The versionId of the object that the tag-set will be removed from.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput
+type DeleteObjectTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The versionId of the object the tag-set was removed from.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest
+type DeleteObjectsInput struct {
+ _ struct{} `type:"structure" payload:"Delete"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Delete is a required field
+ Delete *Delete `locationName:"Delete" type:"structure" required:"true"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation
+func (s DeleteObjectsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Delete == nil {
+ invalidParams.Add(request.NewErrParamRequired("Delete"))
+ }
+ if s.Delete != nil {
+ if err := s.Delete.Validate(); err != nil {
+ invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetDelete sets the Delete field's value.
+func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput {
+ s.Delete = v
+ return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput {
+ s.MFA = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput
+type DeleteObjectsOutput struct {
+ _ struct{} `type:"structure"`
+
+ Deleted []*DeletedObject `type:"list" flattened:"true"`
+
+ Errors []*Error `locationName:"Error" type:"list" flattened:"true"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s DeleteObjectsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectsOutput) GoString() string {
+ return s.String()
+}
+
+// SetDeleted sets the Deleted field's value.
+func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput {
+ s.Deleted = v
+ return s
+}
+
+// SetErrors sets the Errors field's value.
+func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput {
+ s.Errors = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject
+type DeletedObject struct {
+ _ struct{} `type:"structure"`
+
+ DeleteMarker *bool `type:"boolean"`
+
+ DeleteMarkerVersionId *string `type:"string"`
+
+ Key *string `min:"1" type:"string"`
+
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DeletedObject) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeletedObject) GoString() string {
+ return s.String()
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value.
+func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject {
+ s.DeleteMarkerVersionId = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeletedObject) SetKey(v string) *DeletedObject {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeletedObject) SetVersionId(v string) *DeletedObject {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination
+type Destination struct {
+ _ struct{} `type:"structure"`
+
+ // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store
+ // replicas of the object identified by the rule.
+ //
+ // Bucket is a required field
+ Bucket *string `type:"string" required:"true"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"StorageClass"`
+}
+
+// String returns the string representation
+func (s Destination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Destination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Destination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Destination"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *Destination) SetBucket(v string) *Destination {
+ s.Bucket = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Destination) SetStorageClass(v string) *Destination {
+ s.StorageClass = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error
+type Error struct {
+ _ struct{} `type:"structure"`
+
+ Code *string `type:"string"`
+
+ Key *string `min:"1" type:"string"`
+
+ Message *string `type:"string"`
+
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Error) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Error) GoString() string {
+ return s.String()
+}
+
+// SetCode sets the Code field's value.
+func (s *Error) SetCode(v string) *Error {
+ s.Code = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Error) SetKey(v string) *Error {
+ s.Key = &v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *Error) SetMessage(v string) *Error {
+ s.Message = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *Error) SetVersionId(v string) *Error {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument
+type ErrorDocument struct {
+ _ struct{} `type:"structure"`
+
+ // The object key name to use when a 4XX class error occurs.
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ErrorDocument) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ErrorDocument) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ErrorDocument) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *ErrorDocument) SetKey(v string) *ErrorDocument {
+ s.Key = &v
+ return s
+}
+
+// Container for key value pair that defines the criteria for the filter rule.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule
+type FilterRule struct {
+ _ struct{} `type:"structure"`
+
+ // Object key name prefix or suffix identifying one or more objects to which
+ // the filtering rule applies. Maximum prefix length can be up to 1,024 characters.
+ // Overlapping prefixes and suffixes are not supported. For more information,
+ // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ Name *string `type:"string" enum:"FilterRuleName"`
+
+ Value *string `type:"string"`
+}
+
+// String returns the string representation
+func (s FilterRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FilterRule) GoString() string {
+ return s.String()
+}
+
+// SetName sets the Name field's value.
+func (s *FilterRule) SetName(v string) *FilterRule {
+ s.Name = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *FilterRule) SetValue(v string) *FilterRule {
+ s.Value = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest
+type GetBucketAccelerateConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the bucket for which the accelerate configuration is retrieved.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAccelerateConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAccelerateConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAccelerateConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput
+type GetBucketAccelerateConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The accelerate configuration of the bucket.
+ Status *string `type:"string" enum:"BucketAccelerateStatus"`
+}
+
+// String returns the string representation
+func (s GetBucketAccelerateConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAccelerateConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetStatus sets the Status field's value.
+func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput {
+ s.Status = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest
+type GetBucketAclInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput
+type GetBucketAclOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAclOutput) GoString() string {
+ return s.String()
+}
+
+// SetGrants sets the Grants field's value.
+func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput {
+ s.Grants = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput {
+ s.Owner = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest
+type GetBucketAnalyticsConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket from which an analytics configuration is retrieved.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The identifier used to represent an analytics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAnalyticsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAnalyticsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAnalyticsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput
+type GetBucketAnalyticsConfigurationOutput struct {
+ _ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
+
+ // The configuration and any analyses for the analytics filter.
+ AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketAnalyticsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAnalyticsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value.
+func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput {
+ s.AnalyticsConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest
+type GetBucketCorsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketCorsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketCorsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketCorsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput
+type GetBucketCorsOutput struct {
+ _ struct{} `type:"structure"`
+
+ CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketCorsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketCorsOutput) GoString() string {
+ return s.String()
+}
+
+// SetCORSRules sets the CORSRules field's value.
+func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput {
+ s.CORSRules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest
+type GetBucketInventoryConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket containing the inventory configuration to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the inventory configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketInventoryConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketInventoryConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketInventoryConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput
+type GetBucketInventoryConfigurationOutput struct {
+ _ struct{} `type:"structure" payload:"InventoryConfiguration"`
+
+ // Specifies the inventory configuration.
+ InventoryConfiguration *InventoryConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketInventoryConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketInventoryConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetInventoryConfiguration sets the InventoryConfiguration field's value.
+func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput {
+ s.InventoryConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest
+type GetBucketLifecycleConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLifecycleConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput
+type GetBucketLifecycleConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+
+ Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetRules sets the Rules field's value.
+func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput {
+ s.Rules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest
+type GetBucketLifecycleInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLifecycleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput
+type GetBucketLifecycleOutput struct {
+ _ struct{} `type:"structure"`
+
+ Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleOutput) GoString() string {
+ return s.String()
+}
+
+// SetRules sets the Rules field's value.
+func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput {
+ s.Rules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest
+type GetBucketLocationInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLocationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLocationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLocationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput
+type GetBucketLocationOutput struct {
+ _ struct{} `type:"structure"`
+
+ LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
+}
+
+// String returns the string representation
+func (s GetBucketLocationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLocationOutput) GoString() string {
+ return s.String()
+}
+
+// SetLocationConstraint sets the LocationConstraint field's value.
+func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput {
+ s.LocationConstraint = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest
+type GetBucketLoggingInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLoggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLoggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLoggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput
+type GetBucketLoggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ LoggingEnabled *LoggingEnabled `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketLoggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLoggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetLoggingEnabled sets the LoggingEnabled field's value.
+func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput {
+ s.LoggingEnabled = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest
+type GetBucketMetricsConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket containing the metrics configuration to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the metrics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketMetricsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketMetricsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketMetricsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput
+type GetBucketMetricsConfigurationOutput struct {
+ _ struct{} `type:"structure" payload:"MetricsConfiguration"`
+
+ // Specifies the metrics configuration.
+ MetricsConfiguration *MetricsConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketMetricsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketMetricsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetMetricsConfiguration sets the MetricsConfiguration field's value.
+func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput {
+ s.MetricsConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest
+type GetBucketNotificationConfigurationRequest struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the bucket to get the notification configuration for.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketNotificationConfigurationRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketNotificationConfigurationRequest) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketNotificationConfigurationRequest) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest
+type GetBucketPolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput
+type GetBucketPolicyOutput struct {
+ _ struct{} `type:"structure" payload:"Policy"`
+
+ // The bucket policy as a JSON document.
+ Policy *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetBucketPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketPolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput {
+ s.Policy = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest
+type GetBucketReplicationInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketReplicationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketReplicationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketReplicationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput
+type GetBucketReplicationOutput struct {
+ _ struct{} `type:"structure" payload:"ReplicationConfiguration"`
+
+ // Container for replication rules. You can add as many as 1,000 rules. Total
+ // replication configuration size can be up to 2 MB.
+ ReplicationConfiguration *ReplicationConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketReplicationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketReplicationOutput) GoString() string {
+ return s.String()
+}
+
+// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
+func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput {
+ s.ReplicationConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest
+type GetBucketRequestPaymentInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketRequestPaymentInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketRequestPaymentInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketRequestPaymentInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput
+type GetBucketRequestPaymentOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies who pays for the download and request fees.
+ Payer *string `type:"string" enum:"Payer"`
+}
+
+// String returns the string representation
+func (s GetBucketRequestPaymentOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketRequestPaymentOutput) GoString() string {
+ return s.String()
+}
+
+// SetPayer sets the Payer field's value.
+func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput {
+ s.Payer = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest
+type GetBucketTaggingInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput
+type GetBucketTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // TagSet is a required field
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetTagSet sets the TagSet field's value.
+func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput {
+ s.TagSet = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest
+type GetBucketVersioningInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketVersioningInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketVersioningInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketVersioningInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput
+type GetBucketVersioningOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA
+ // delete. If the bucket has never been so configured, this element is not returned.
+ MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"`
+
+ // The versioning state of the bucket.
+ Status *string `type:"string" enum:"BucketVersioningStatus"`
+}
+
+// String returns the string representation
+func (s GetBucketVersioningOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketVersioningOutput) GoString() string {
+ return s.String()
+}
+
+// SetMFADelete sets the MFADelete field's value.
+func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput {
+ s.MFADelete = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput {
+ s.Status = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest
+type GetBucketWebsiteInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketWebsiteInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketWebsiteInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketWebsiteInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput
+type GetBucketWebsiteOutput struct {
+ _ struct{} `type:"structure"`
+
+ ErrorDocument *ErrorDocument `type:"structure"`
+
+ IndexDocument *IndexDocument `type:"structure"`
+
+ RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+
+ RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
+}
+
+// String returns the string representation
+func (s GetBucketWebsiteOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketWebsiteOutput) GoString() string {
+ return s.String()
+}
+
+// SetErrorDocument sets the ErrorDocument field's value.
+func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput {
+ s.ErrorDocument = v
+ return s
+}
+
+// SetIndexDocument sets the IndexDocument field's value.
+func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput {
+ s.IndexDocument = v
+ return s
+}
+
+// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value.
+func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput {
+ s.RedirectAllRequestsTo = v
+ return s
+}
+
+// SetRoutingRules sets the RoutingRules field's value.
+func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput {
+ s.RoutingRules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest
+type GetObjectAclInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput
+type GetObjectAclOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ Owner *Owner `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s GetObjectAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectAclOutput) GoString() string {
+ return s.String()
+}
+
+// SetGrants sets the Grants field's value.
+func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput {
+ s.Grants = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput {
+ s.Owner = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest
+type GetObjectInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Return the object only if its entity tag (ETag) is the same as the one specified,
+ // otherwise return a 412 (precondition failed).
+ IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
+
+ // Return the object only if it has been modified since the specified time,
+ // otherwise return a 304 (not modified).
+ IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified, otherwise return a 304 (not modified).
+ IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
+
+ // Return the object only if it has not been modified since the specified time,
+ // otherwise return a 412 (precondition failed).
+ IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of the object being read. This is a positive integer between
+ // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified.
+ // Useful for downloading just a part of an object.
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
+
+ // Downloads the specified range bytes of an object. For more information about
+ // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+ Range *string `location:"header" locationName:"Range" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Sets the Cache-Control header of the response.
+ ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"`
+
+ // Sets the Content-Disposition header of the response
+ ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"`
+
+ // Sets the Content-Encoding header of the response.
+ ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"`
+
+ // Sets the Content-Language header of the response.
+ ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"`
+
+ // Sets the Content-Type header of the response.
+ ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"`
+
+ // Sets the Expires header of the response.
+ ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectInput) SetBucket(v string) *GetObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetIfMatch sets the IfMatch field's value.
+func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput {
+ s.IfMatch = &v
+ return s
+}
+
+// SetIfModifiedSince sets the IfModifiedSince field's value.
+func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput {
+ s.IfModifiedSince = &v
+ return s
+}
+
+// SetIfNoneMatch sets the IfNoneMatch field's value.
+func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput {
+ s.IfNoneMatch = &v
+ return s
+}
+
+// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value.
+func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput {
+ s.IfUnmodifiedSince = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectInput) SetKey(v string) *GetObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRange sets the Range field's value.
+func (s *GetObjectInput) SetRange(v string) *GetObjectInput {
+ s.Range = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetResponseCacheControl sets the ResponseCacheControl field's value.
+func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput {
+ s.ResponseCacheControl = &v
+ return s
+}
+
+// SetResponseContentDisposition sets the ResponseContentDisposition field's value.
+func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput {
+ s.ResponseContentDisposition = &v
+ return s
+}
+
+// SetResponseContentEncoding sets the ResponseContentEncoding field's value.
+func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput {
+ s.ResponseContentEncoding = &v
+ return s
+}
+
+// SetResponseContentLanguage sets the ResponseContentLanguage field's value.
+func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput {
+ s.ResponseContentLanguage = &v
+ return s
+}
+
+// SetResponseContentType sets the ResponseContentType field's value.
+func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput {
+ s.ResponseContentType = &v
+ return s
+}
+
+// SetResponseExpires sets the ResponseExpires field's value.
+func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput {
+ s.ResponseExpires = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput
+type GetObjectOutput struct {
+ _ struct{} `type:"structure" payload:"Body"`
+
+ AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
+
+ // Object data.
+ Body io.ReadCloser `type:"blob"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+ // The portion of the object returned in the response.
+ ContentRange *string `location:"header" locationName:"Content-Range" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key value pairs
+ // providing object expiration information. The value of the rule-id is URL
+ // encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *string `location:"header" locationName:"Expires" type:"string"`
+
+ // Last modified date of the object
+ LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP,
+ // you can create metadata whose values are not legal HTTP headers.
+ MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
+
+ // The count of parts this object has.
+ PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"`
+
+ ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Provides information about object restoration operation and expiration time
+ // of the restored object copy.
+ Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The number of tags, if any, on the object.
+ TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetAcceptRanges sets the AcceptRanges field's value.
+func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput {
+ s.AcceptRanges = &v
+ return s
+}
+
+// SetBody sets the Body field's value.
+func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput {
+ s.Body = v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentRange sets the ContentRange field's value.
+func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput {
+ s.ContentRange = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput {
+ s.ContentType = &v
+ return s
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput {
+ s.Expires = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput {
+ s.LastModified = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput {
+ s.Metadata = v
+ return s
+}
+
+// SetMissingMeta sets the MissingMeta field's value.
+func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput {
+ s.MissingMeta = &v
+ return s
+}
+
+// SetPartsCount sets the PartsCount field's value.
+func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput {
+ s.PartsCount = &v
+ return s
+}
+
+// SetReplicationStatus sets the ReplicationStatus field's value.
+func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput {
+ s.ReplicationStatus = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetRestore sets the Restore field's value.
+func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput {
+ s.Restore = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagCount sets the TagCount field's value.
+func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput {
+ s.TagCount = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest
+type GetObjectTaggingInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput
+type GetObjectTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // TagSet is a required field
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetTagSet sets the TagSet field's value.
+func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput {
+ s.TagSet = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest
+type GetObjectTorrentInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation
+func (s GetObjectTorrentInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTorrentInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectTorrentInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput
+type GetObjectTorrentOutput struct {
+ _ struct{} `type:"structure" payload:"Body"`
+
+ Body io.ReadCloser `type:"blob"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s GetObjectTorrentOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTorrentOutput) GoString() string {
+ return s.String()
+}
+
+// SetBody sets the Body field's value.
+func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput {
+ s.Body = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters
+type GlacierJobParameters struct {
+ _ struct{} `type:"structure"`
+
+ // Glacier retrieval tier at which the restore will be processed.
+ //
+ // Tier is a required field
+ Tier *string `type:"string" required:"true" enum:"Tier"`
+}
+
+// String returns the string representation
+func (s GlacierJobParameters) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlacierJobParameters) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GlacierJobParameters) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"}
+ if s.Tier == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tier"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTier sets the Tier field's value.
+func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters {
+ s.Tier = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant
+type Grant struct {
+ _ struct{} `type:"structure"`
+
+ Grantee *Grantee `type:"structure"`
+
+ // Specifies the permission given to the grantee.
+ Permission *string `type:"string" enum:"Permission"`
+}
+
+// String returns the string representation
+func (s Grant) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Grant) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Grant) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Grant"}
+ if s.Grantee != nil {
+ if err := s.Grantee.Validate(); err != nil {
+ invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGrantee sets the Grantee field's value.
+func (s *Grant) SetGrantee(v *Grantee) *Grant {
+ s.Grantee = v
+ return s
+}
+
+// SetPermission sets the Permission field's value.
+func (s *Grant) SetPermission(v string) *Grant {
+ s.Permission = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee
+type Grantee struct {
+ _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
+
+ // Screen name of the grantee.
+ DisplayName *string `type:"string"`
+
+ // Email address of the grantee.
+ EmailAddress *string `type:"string"`
+
+ // The canonical user ID of the grantee.
+ ID *string `type:"string"`
+
+ // Type of grantee
+ //
+ // Type is a required field
+ Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"`
+
+ // URI of the grantee group.
+ URI *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Grantee) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Grantee) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Grantee) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Grantee"}
+ if s.Type == nil {
+ invalidParams.Add(request.NewErrParamRequired("Type"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Grantee) SetDisplayName(v string) *Grantee {
+ s.DisplayName = &v
+ return s
+}
+
+// SetEmailAddress sets the EmailAddress field's value.
+func (s *Grantee) SetEmailAddress(v string) *Grantee {
+ s.EmailAddress = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Grantee) SetID(v string) *Grantee {
+ s.ID = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *Grantee) SetType(v string) *Grantee {
+ s.Type = &v
+ return s
+}
+
+// SetURI sets the URI field's value.
+func (s *Grantee) SetURI(v string) *Grantee {
+ s.URI = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest
+type HeadBucketInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s HeadBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *HeadBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput
+type HeadBucketOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s HeadBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadBucketOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest
+type HeadObjectInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Return the object only if its entity tag (ETag) is the same as the one specified,
+ // otherwise return a 412 (precondition failed).
+ IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
+
+ // Return the object only if it has been modified since the specified time,
+ // otherwise return a 304 (not modified).
+ IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified, otherwise return a 304 (not modified).
+ IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
+
+ // Return the object only if it has not been modified since the specified time,
+ // otherwise return a 412 (precondition failed).
+ IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of the object being read. This is a positive integer between
+ // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified.
+ // Useful querying about the size of the part and the number of parts in this
+ // object.
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
+
+ // Downloads the specified range bytes of an object. For more information about
+ // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+ Range *string `location:"header" locationName:"Range" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s HeadObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *HeadObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetIfMatch sets the IfMatch field's value.
+func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput {
+ s.IfMatch = &v
+ return s
+}
+
+// SetIfModifiedSince sets the IfModifiedSince field's value.
+func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput {
+ s.IfModifiedSince = &v
+ return s
+}
+
+// SetIfNoneMatch sets the IfNoneMatch field's value.
+func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput {
+ s.IfNoneMatch = &v
+ return s
+}
+
+// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value.
+func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput {
+ s.IfUnmodifiedSince = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRange sets the Range field's value.
+func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput {
+ s.Range = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput
+type HeadObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key value pairs
+ // providing object expiration information. The value of the rule-id is URL
+ // encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *string `location:"header" locationName:"Expires" type:"string"`
+
+ // Last modified date of the object
+ LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP,
+ // you can create metadata whose values are not legal HTTP headers.
+ MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
+
+ // The count of parts this object has.
+ PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"`
+
+ ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Provides information about object restoration operation and expiration time
+ // of the restored object copy.
+ Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s HeadObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetAcceptRanges sets the AcceptRanges field's value.
+func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput {
+ s.AcceptRanges = &v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput {
+ s.ContentType = &v
+ return s
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput {
+ s.Expires = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput {
+ s.LastModified = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput {
+ s.Metadata = v
+ return s
+}
+
+// SetMissingMeta sets the MissingMeta field's value.
+func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput {
+ s.MissingMeta = &v
+ return s
+}
+
+// SetPartsCount sets the PartsCount field's value.
+func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput {
+ s.PartsCount = &v
+ return s
+}
+
+// SetReplicationStatus sets the ReplicationStatus field's value.
+func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput {
+ s.ReplicationStatus = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetRestore sets the Restore field's value.
+func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput {
+ s.Restore = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument
+type IndexDocument struct {
+ _ struct{} `type:"structure"`
+
+ // A suffix that is appended to a request that is for a directory on the website
+ // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/
+ // the data that is returned will be for the object with the key name images/index.html)
+ // The suffix must not be empty and must not include a slash character.
+ //
+ // Suffix is a required field
+ Suffix *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s IndexDocument) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s IndexDocument) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *IndexDocument) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "IndexDocument"}
+ if s.Suffix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Suffix"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetSuffix sets the Suffix field's value.
+func (s *IndexDocument) SetSuffix(v string) *IndexDocument {
+ s.Suffix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator
+type Initiator struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the Principal.
+ DisplayName *string `type:"string"`
+
+ // If the principal is an AWS account, it provides the Canonical User ID. If
+ // the principal is an IAM User, it provides a user ARN value.
+ ID *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Initiator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Initiator) GoString() string {
+ return s.String()
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Initiator) SetDisplayName(v string) *Initiator {
+ s.DisplayName = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Initiator) SetID(v string) *Initiator {
+ s.ID = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration
+type InventoryConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Contains information about where to publish the inventory results.
+ //
+ // Destination is a required field
+ Destination *InventoryDestination `type:"structure" required:"true"`
+
+ // Specifies an inventory filter. The inventory only includes objects that meet
+ // the filter's criteria.
+ Filter *InventoryFilter `type:"structure"`
+
+ // The ID used to identify the inventory configuration.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+
+ // Specifies which object version(s) to included in the inventory results.
+ //
+ // IncludedObjectVersions is a required field
+ IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"`
+
+ // Specifies whether the inventory is enabled or disabled.
+ //
+ // IsEnabled is a required field
+ IsEnabled *bool `type:"boolean" required:"true"`
+
+ // Contains the optional fields that are included in the inventory results.
+ OptionalFields []*string `locationNameList:"Field" type:"list"`
+
+ // Specifies the schedule for generating inventory results.
+ //
+ // Schedule is a required field
+ Schedule *InventorySchedule `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s InventoryConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"}
+ if s.Destination == nil {
+ invalidParams.Add(request.NewErrParamRequired("Destination"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.IncludedObjectVersions == nil {
+ invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions"))
+ }
+ if s.IsEnabled == nil {
+ invalidParams.Add(request.NewErrParamRequired("IsEnabled"))
+ }
+ if s.Schedule == nil {
+ invalidParams.Add(request.NewErrParamRequired("Schedule"))
+ }
+ if s.Destination != nil {
+ if err := s.Destination.Validate(); err != nil {
+ invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Schedule != nil {
+ if err := s.Schedule.Validate(); err != nil {
+ invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDestination sets the Destination field's value.
+func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration {
+ s.Destination = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetIncludedObjectVersions sets the IncludedObjectVersions field's value.
+func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration {
+ s.IncludedObjectVersions = &v
+ return s
+}
+
+// SetIsEnabled sets the IsEnabled field's value.
+func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration {
+ s.IsEnabled = &v
+ return s
+}
+
+// SetOptionalFields sets the OptionalFields field's value.
+func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration {
+ s.OptionalFields = v
+ return s
+}
+
+// SetSchedule sets the Schedule field's value.
+func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration {
+ s.Schedule = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination
+type InventoryDestination struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the bucket name, file format, bucket owner (optional), and prefix
+ // (optional) where inventory results are published.
+ //
+ // S3BucketDestination is a required field
+ S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s InventoryDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryDestination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryDestination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"}
+ if s.S3BucketDestination == nil {
+ invalidParams.Add(request.NewErrParamRequired("S3BucketDestination"))
+ }
+ if s.S3BucketDestination != nil {
+ if err := s.S3BucketDestination.Validate(); err != nil {
+ invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetS3BucketDestination sets the S3BucketDestination field's value.
+func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination {
+ s.S3BucketDestination = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter
+type InventoryFilter struct {
+ _ struct{} `type:"structure"`
+
+ // The prefix that an object must have to be included in the inventory results.
+ //
+ // Prefix is a required field
+ Prefix *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s InventoryFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"}
+ if s.Prefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Prefix"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter {
+ s.Prefix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination
+type InventoryS3BucketDestination struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the account that owns the destination bucket.
+ AccountId *string `type:"string"`
+
+ // The Amazon resource name (ARN) of the bucket where inventory results will
+ // be published.
+ //
+ // Bucket is a required field
+ Bucket *string `type:"string" required:"true"`
+
+ // Specifies the output format of the inventory results.
+ //
+ // Format is a required field
+ Format *string `type:"string" required:"true" enum:"InventoryFormat"`
+
+ // The prefix that is prepended to all inventory results.
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s InventoryS3BucketDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryS3BucketDestination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryS3BucketDestination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Format == nil {
+ invalidParams.Add(request.NewErrParamRequired("Format"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination {
+ s.AccountId = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination {
+ s.Bucket = &v
+ return s
+}
+
+// SetFormat sets the Format field's value.
+func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination {
+ s.Format = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination {
+ s.Prefix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule
+type InventorySchedule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies how frequently inventory results are produced.
+ //
+ // Frequency is a required field
+ Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"`
+}
+
+// String returns the string representation
+func (s InventorySchedule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventorySchedule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventorySchedule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"}
+ if s.Frequency == nil {
+ invalidParams.Add(request.NewErrParamRequired("Frequency"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFrequency sets the Frequency field's value.
+func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule {
+ s.Frequency = &v
+ return s
+}
+
+// Container for object key name prefix and suffix filtering rules.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter
+type KeyFilter struct {
+ _ struct{} `type:"structure"`
+
+ // A list of containers for key value pair that defines the criteria for the
+ // filter rule.
+ FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s KeyFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s KeyFilter) GoString() string {
+ return s.String()
+}
+
+// SetFilterRules sets the FilterRules field's value.
+func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter {
+ s.FilterRules = v
+ return s
+}
+
+// Container for specifying the AWS Lambda notification configuration.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration
+type LambdaFunctionConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Events is a required field
+ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+ // Container for object key name filtering rules. For information about key
+ // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ Filter *NotificationConfigurationFilter `type:"structure"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ // Lambda cloud function ARN that Amazon S3 can invoke when it detects events
+ // of the specified type.
+ //
+ // LambdaFunctionArn is a required field
+ LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s LambdaFunctionConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LambdaFunctionConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LambdaFunctionConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"}
+ if s.Events == nil {
+ invalidParams.Add(request.NewErrParamRequired("Events"))
+ }
+ if s.LambdaFunctionArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEvents sets the Events field's value.
+func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration {
+ s.Events = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetLambdaFunctionArn sets the LambdaFunctionArn field's value.
+func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration {
+ s.LambdaFunctionArn = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration
+type LifecycleConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Rules is a required field
+ Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s LifecycleConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"}
+ if s.Rules == nil {
+ invalidParams.Add(request.NewErrParamRequired("Rules"))
+ }
+ if s.Rules != nil {
+ for i, v := range s.Rules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRules sets the Rules field's value.
+func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration {
+ s.Rules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration
+type LifecycleExpiration struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates at what date the object is to be moved or deleted. Should be in
+ // GMT ISO 8601 Format.
+ Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Indicates the lifetime, in days, of the objects that are subject to the rule.
+ // The value must be a non-zero positive integer.
+ Days *int64 `type:"integer"`
+
+ // Indicates whether Amazon S3 will remove a delete marker with no noncurrent
+ // versions. If set to true, the delete marker will be expired; if set to false
+ // the policy takes no action. This cannot be specified with Days or Date in
+ // a Lifecycle Expiration Policy.
+ ExpiredObjectDeleteMarker *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s LifecycleExpiration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleExpiration) GoString() string {
+ return s.String()
+}
+
+// SetDate sets the Date field's value.
+func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration {
+ s.Date = &v
+ return s
+}
+
+// SetDays sets the Days field's value.
+func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration {
+ s.Days = &v
+ return s
+}
+
+// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value.
+func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration {
+ s.ExpiredObjectDeleteMarker = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule
+type LifecycleRule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the days since the initiation of an Incomplete Multipart Upload
+ // that Lifecycle will wait before permanently removing all parts of the upload.
+ AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
+
+ Expiration *LifecycleExpiration `type:"structure"`
+
+ // The Filter is used to identify objects that a Lifecycle Rule applies to.
+ // A Filter must have exactly one of Prefix, Tag, or And specified.
+ Filter *LifecycleRuleFilter `type:"structure"`
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string `type:"string"`
+
+ // Specifies when noncurrent object versions expire. Upon expiration, Amazon
+ // S3 permanently deletes the noncurrent object versions. You set this lifecycle
+ // configuration action on a bucket that has versioning enabled (or suspended)
+ // to request that Amazon S3 delete noncurrent object versions at a specific
+ // period in the object's lifetime.
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
+
+ NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"`
+
+ // Prefix identifying one or more objects to which the rule applies. This is
+ // deprecated; use Filter instead.
+ Prefix *string `deprecated:"true" type:"string"`
+
+ // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
+ // is not currently being applied.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
+
+ Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s LifecycleRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"}
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value.
+func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule {
+ s.AbortIncompleteMultipartUpload = v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule {
+ s.Expiration = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule {
+ s.Filter = v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *LifecycleRule) SetID(v string) *LifecycleRule {
+ s.ID = &v
+ return s
+}
+
+// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value.
+func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule {
+ s.NoncurrentVersionExpiration = v
+ return s
+}
+
+// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value.
+func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule {
+ s.NoncurrentVersionTransitions = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule {
+ s.Prefix = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *LifecycleRule) SetStatus(v string) *LifecycleRule {
+ s.Status = &v
+ return s
+}
+
+// SetTransitions sets the Transitions field's value.
+func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule {
+ s.Transitions = v
+ return s
+}
+
+// This is used in a Lifecycle Rule Filter to apply a logical AND to two or
+// more predicates. The Lifecycle Rule will apply to any object matching all
+// of the predicates configured inside the And operator.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator
+type LifecycleRuleAndOperator struct {
+ _ struct{} `type:"structure"`
+
+ Prefix *string `type:"string"`
+
+ // All of these tags must exist in the object's tag set in order for the rule
+ // to apply.
+ Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s LifecycleRuleAndOperator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleRuleAndOperator) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRuleAndOperator) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator {
+ s.Prefix = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator {
+ s.Tags = v
+ return s
+}
+
+// The Filter is used to identify objects that a Lifecycle Rule applies to.
+// A Filter must have exactly one of Prefix, Tag, or And specified.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter
+type LifecycleRuleFilter struct {
+ _ struct{} `type:"structure"`
+
+ // This is used in a Lifecycle Rule Filter to apply a logical AND to two or
+ // more predicates. The Lifecycle Rule will apply to any object matching all
+ // of the predicates configured inside the And operator.
+ And *LifecycleRuleAndOperator `type:"structure"`
+
+ // Prefix identifying one or more objects to which the rule applies.
+ Prefix *string `type:"string"`
+
+ // This tag must exist in the object's tag set in order for the rule to apply.
+ Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s LifecycleRuleFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleRuleFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRuleFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"}
+ if s.And != nil {
+ if err := s.And.Validate(); err != nil {
+ invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tag != nil {
+ if err := s.Tag.Validate(); err != nil {
+ invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter {
+ s.And = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter {
+ s.Prefix = &v
+ return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter {
+ s.Tag = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest
+type ListBucketAnalyticsConfigurationsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket from which analytics configurations are retrieved.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ContinuationToken that represents a placeholder from where this request
+ // should begin.
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketAnalyticsConfigurationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketAnalyticsConfigurationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBucketAnalyticsConfigurationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput
+type ListBucketAnalyticsConfigurationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The list of analytics configurations for a bucket.
+ AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"`
+
+ // The ContinuationToken that represents where this request began.
+ ContinuationToken *string `type:"string"`
+
+ // Indicates whether the returned list of analytics configurations is complete.
+ // A value of true indicates that the list is not complete and the NextContinuationToken
+ // will be provided for a subsequent request.
+ IsTruncated *bool `type:"boolean"`
+
+ // NextContinuationToken is sent when isTruncated is true, which indicates that
+ // there are more analytics configurations to list. The next request must include
+ // this NextContinuationToken. The token is obfuscated and is not a usable value.
+ NextContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketAnalyticsConfigurationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketAnalyticsConfigurationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput {
+ s.AnalyticsConfigurationList = v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput {
+ s.NextContinuationToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest
+type ListBucketInventoryConfigurationsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket containing the inventory configurations to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The marker used to continue an inventory configuration listing that has been
+ // truncated. Use the NextContinuationToken from a previously truncated list
+ // response to continue the listing. The continuation token is an opaque value
+ // that Amazon S3 understands.
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketInventoryConfigurationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketInventoryConfigurationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBucketInventoryConfigurationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput
+type ListBucketInventoryConfigurationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If sent in the request, the marker that is used as a starting point for this
+ // inventory configuration list response.
+ ContinuationToken *string `type:"string"`
+
+ // The list of inventory configurations for a bucket.
+ InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"`
+
+ // Indicates whether the returned list of inventory configurations is truncated
+ // in this response. A value of true indicates that the list is truncated.
+ IsTruncated *bool `type:"boolean"`
+
+ // The marker used to continue this inventory configuration listing. Use the
+ // NextContinuationToken from this response to continue the listing in a subsequent
+ // request. The continuation token is an opaque value that Amazon S3 understands.
+ NextContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketInventoryConfigurationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketInventoryConfigurationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetInventoryConfigurationList sets the InventoryConfigurationList field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput {
+ s.InventoryConfigurationList = v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput {
+ s.NextContinuationToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest
+type ListBucketMetricsConfigurationsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket containing the metrics configurations to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The marker that is used to continue a metrics configuration listing that
+ // has been truncated. Use the NextContinuationToken from a previously truncated
+ // list response to continue the listing. The continuation token is an opaque
+ // value that Amazon S3 understands.
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketMetricsConfigurationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketMetricsConfigurationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBucketMetricsConfigurationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput
+type ListBucketMetricsConfigurationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The marker that is used as a starting point for this metrics configuration
+ // list response. This value is present if it was sent in the request.
+ ContinuationToken *string `type:"string"`
+
+ // Indicates whether the returned list of metrics configurations is complete.
+ // A value of true indicates that the list is not complete and the NextContinuationToken
+ // will be provided for a subsequent request.
+ IsTruncated *bool `type:"boolean"`
+
+ // The list of metrics configurations for a bucket.
+ MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"`
+
+ // The marker used to continue a metrics configuration listing that has been
+ // truncated. Use the NextContinuationToken from a previously truncated list
+ // response to continue the listing. The continuation token is an opaque value
+ // that Amazon S3 understands.
+ NextContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketMetricsConfigurationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketMetricsConfigurationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetMetricsConfigurationList sets the MetricsConfigurationList field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput {
+ s.MetricsConfigurationList = v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput {
+ s.NextContinuationToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput
+type ListBucketsInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s ListBucketsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketsInput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput
+type ListBucketsOutput struct {
+ _ struct{} `type:"structure"`
+
+ Buckets []*Bucket `locationNameList:"Bucket" type:"list"`
+
+ Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation
+func (s ListBucketsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketsOutput) GoString() string {
+ return s.String()
+}
+
+// SetBuckets sets the Buckets field's value.
+func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput {
+ s.Buckets = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput {
+ s.Owner = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest
+type ListMultipartUploadsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Together with upload-id-marker, this parameter specifies the multipart upload
+ // after which listing should begin.
+ KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
+
+ // Sets the maximum number of multipart uploads, from 1 to 1,000, to return
+ // in the response body. 1,000 is the maximum number of uploads that can be
+ // returned in a response.
+ MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"`
+
+ // Lists in-progress uploads only for those keys that begin with the specified
+ // prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Together with key-marker, specifies the multipart upload after which listing
+ // should begin. If key-marker is not specified, the upload-id-marker parameter
+ // is ignored.
+ UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"`
+}
+
+// String returns the string representation
+func (s ListMultipartUploadsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListMultipartUploadsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListMultipartUploadsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxUploads sets the MaxUploads field's value.
+func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput {
+ s.MaxUploads = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput {
+ s.Prefix = &v
+ return s
+}
+
+// SetUploadIdMarker sets the UploadIdMarker field's value.
+func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput {
+ s.UploadIdMarker = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput
+type ListMultipartUploadsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `type:"string"`
+
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // Indicates whether the returned list of multipart uploads is truncated. A
+ // value of true indicates that the list was truncated. The list can be truncated
+ // if the number of multipart uploads exceeds the limit allowed or specified
+ // by max uploads.
+ IsTruncated *bool `type:"boolean"`
+
+ // The key at or after which the listing began.
+ KeyMarker *string `type:"string"`
+
+ // Maximum number of multipart uploads that could have been included in the
+ // response.
+ MaxUploads *int64 `type:"integer"`
+
+ // When a list is truncated, this element specifies the value that should be
+ // used for the key-marker request parameter in a subsequent request.
+ NextKeyMarker *string `type:"string"`
+
+ // When a list is truncated, this element specifies the value that should be
+ // used for the upload-id-marker request parameter in a subsequent request.
+ NextUploadIdMarker *string `type:"string"`
+
+ // When a prefix is provided in the request, this field contains the specified
+ // prefix. The result contains only keys starting with the specified prefix.
+ Prefix *string `type:"string"`
+
+ // Upload ID after which listing began.
+ UploadIdMarker *string `type:"string"`
+
+ Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s ListMultipartUploadsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListMultipartUploadsOutput) GoString() string {
+ return s.String()
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxUploads sets the MaxUploads field's value.
+func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput {
+ s.MaxUploads = &v
+ return s
+}
+
+// SetNextKeyMarker sets the NextKeyMarker field's value.
+func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput {
+ s.NextKeyMarker = &v
+ return s
+}
+
+// SetNextUploadIdMarker sets the NextUploadIdMarker field's value.
+func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput {
+ s.NextUploadIdMarker = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput {
+ s.Prefix = &v
+ return s
+}
+
+// SetUploadIdMarker sets the UploadIdMarker field's value.
+func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput {
+ s.UploadIdMarker = &v
+ return s
+}
+
+// SetUploads sets the Uploads field's value.
+func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput {
+ s.Uploads = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest
+type ListObjectVersionsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Specifies the key to start with when listing objects in a bucket.
+ KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Specifies the object version you want to start listing from.
+ VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectVersionsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectVersionsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectVersionsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput {
+ s.Prefix = &v
+ return s
+}
+
+// SetVersionIdMarker sets the VersionIdMarker field's value.
+func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput {
+ s.VersionIdMarker = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput
+type ListObjectVersionsOutput struct {
+ _ struct{} `type:"structure"`
+
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"`
+
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // A flag that indicates whether or not Amazon S3 returned all of the results
+ // that satisfied the search criteria. If your results were truncated, you can
+ // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker
+ // response parameters as a starting place in another request to return the
+ // rest of the results.
+ IsTruncated *bool `type:"boolean"`
+
+ // Marks the last Key returned in a truncated response.
+ KeyMarker *string `type:"string"`
+
+ MaxKeys *int64 `type:"integer"`
+
+ Name *string `type:"string"`
+
+ // Use this value for the key marker request parameter in a subsequent request.
+ NextKeyMarker *string `type:"string"`
+
+ // Use this value for the next version id marker parameter in a subsequent request.
+ NextVersionIdMarker *string `type:"string"`
+
+ Prefix *string `type:"string"`
+
+ VersionIdMarker *string `type:"string"`
+
+ Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s ListObjectVersionsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectVersionsOutput) GoString() string {
+ return s.String()
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetDeleteMarkers sets the DeleteMarkers field's value.
+func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput {
+ s.DeleteMarkers = v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput {
+ s.Name = &v
+ return s
+}
+
+// SetNextKeyMarker sets the NextKeyMarker field's value.
+func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput {
+ s.NextKeyMarker = &v
+ return s
+}
+
+// SetNextVersionIdMarker sets the NextVersionIdMarker field's value.
+func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput {
+ s.NextVersionIdMarker = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput {
+ s.Prefix = &v
+ return s
+}
+
+// SetVersionIdMarker sets the VersionIdMarker field's value.
+func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput {
+ s.VersionIdMarker = &v
+ return s
+}
+
+// SetVersions sets the Versions field's value.
+func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput {
+ s.Versions = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest
+type ListObjectsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Specifies the key to start with when listing objects in a bucket.
+ Marker *string `location:"querystring" locationName:"marker" type:"string"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // list objects request. Bucket owners need not specify this parameter in their
+ // requests.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation
+func (s ListObjectsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput {
+ s.Marker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput {
+ s.Prefix = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput
+type ListObjectsOutput struct {
+ _ struct{} `type:"structure"`
+
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ Contents []*Object `type:"list" flattened:"true"`
+
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // A flag that indicates whether or not Amazon S3 returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated *bool `type:"boolean"`
+
+ Marker *string `type:"string"`
+
+ MaxKeys *int64 `type:"integer"`
+
+ Name *string `type:"string"`
+
+ // When response is truncated (the IsTruncated element value in the response
+ // is true), you can use the key name in this field as marker in the subsequent
+ // request to get next set of objects. Amazon S3 lists objects in alphabetical
+ // order Note: This element is returned only if you have delimiter request parameter
+ // specified. If response does not include the NextMaker and it is truncated,
+ // you can use the value of the last Key in the response as the marker in the
+ // subsequent request to get the next set of object keys.
+ NextMarker *string `type:"string"`
+
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsOutput) GoString() string {
+ return s.String()
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetContents sets the Contents field's value.
+func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput {
+ s.Contents = v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput {
+ s.Marker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput {
+ s.Name = &v
+ return s
+}
+
+// SetNextMarker sets the NextMarker field's value.
+func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput {
+ s.NextMarker = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput {
+ s.Prefix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request
+type ListObjectsV2Input struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the bucket to list.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // ContinuationToken indicates Amazon S3 that the list is being continued on
+ // this bucket with a token. ContinuationToken is obfuscated and is not a real
+ // key
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // The owner field is not present in listV2 by default, if you want to return
+ // owner field with each key in the result then set the fetch owner field to
+ // true
+ FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // list objects request in V2 style. Bucket owners need not specify this parameter
+ // in their requests.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
+ // listing after this specified key. StartAfter can be any key in the bucket
+ StartAfter *string `location:"querystring" locationName:"start-after" type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsV2Input) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsV2Input) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectsV2Input) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input {
+ s.Bucket = &v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input {
+ s.EncodingType = &v
+ return s
+}
+
+// SetFetchOwner sets the FetchOwner field's value.
+func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input {
+ s.FetchOwner = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input {
+ s.Prefix = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetStartAfter sets the StartAfter field's value.
+func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input {
+ s.StartAfter = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output
+type ListObjectsV2Output struct {
+ _ struct{} `type:"structure"`
+
+ // CommonPrefixes contains all (if there are any) keys between Prefix and the
+ // next occurrence of the string specified by delimiter
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ // Metadata about each object returned.
+ Contents []*Object `type:"list" flattened:"true"`
+
+ // ContinuationToken indicates Amazon S3 that the list is being continued on
+ // this bucket with a token. ContinuationToken is obfuscated and is not a real
+ // key
+ ContinuationToken *string `type:"string"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // A flag that indicates whether or not Amazon S3 returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated *bool `type:"boolean"`
+
+ // KeyCount is the number of keys returned with this request. KeyCount will
+ // always be less than equals to MaxKeys field. Say you ask for 50 keys, your
+ // result will include less than equals 50 keys
+ KeyCount *int64 `type:"integer"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `type:"integer"`
+
+ // Name of the bucket to list.
+ Name *string `type:"string"`
+
+ // NextContinuationToken is sent when isTruncated is true which means there
+ // are more keys in the bucket that can be listed. The next list requests to
+ // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken
+ // is obfuscated and is not a real key
+ NextContinuationToken *string `type:"string"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `type:"string"`
+
+ // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
+ // listing after this specified key. StartAfter can be any key in the bucket
+ StartAfter *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsV2Output) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsV2Output) GoString() string {
+ return s.String()
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetContents sets the Contents field's value.
+func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output {
+ s.Contents = v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKeyCount sets the KeyCount field's value.
+func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output {
+ s.KeyCount = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output {
+ s.Name = &v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output {
+ s.NextContinuationToken = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output {
+ s.Prefix = &v
+ return s
+}
+
+// SetStartAfter sets the StartAfter field's value.
+func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output {
+ s.StartAfter = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest
+type ListPartsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Sets the maximum number of parts to return.
+ MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"`
+
+ // Specifies the part after which listing should begin. Only parts with higher
+ // part numbers will be listed.
+ PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ //
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ListPartsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListPartsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListPartsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListPartsInput) SetBucket(v string) *ListPartsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ListPartsInput) SetKey(v string) *ListPartsInput {
+ s.Key = &v
+ return s
+}
+
+// SetMaxParts sets the MaxParts field's value.
+func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput {
+ s.MaxParts = &v
+ return s
+}
+
+// SetPartNumberMarker sets the PartNumberMarker field's value.
+func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput {
+ s.PartNumberMarker = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput
+type ListPartsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Date when multipart upload will become eligible for abort operation by lifecycle.
+ AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Id of the lifecycle rule that makes a multipart upload eligible for abort
+ // operation.
+ AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `type:"string"`
+
+ // Identifies who initiated the multipart upload.
+ Initiator *Initiator `type:"structure"`
+
+ // Indicates whether the returned list of parts is truncated.
+ IsTruncated *bool `type:"boolean"`
+
+ // Object key for which the multipart upload was initiated.
+ Key *string `min:"1" type:"string"`
+
+ // Maximum number of parts that were allowed in the response.
+ MaxParts *int64 `type:"integer"`
+
+ // When a list is truncated, this element specifies the last part in the list,
+ // as well as the value to use for the part-number-marker request parameter
+ // in a subsequent request.
+ NextPartNumberMarker *int64 `type:"integer"`
+
+ Owner *Owner `type:"structure"`
+
+ // Part number after which listing begins.
+ PartNumberMarker *int64 `type:"integer"`
+
+ Parts []*Part `locationName:"Part" type:"list" flattened:"true"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"StorageClass"`
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ UploadId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListPartsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListPartsOutput) GoString() string {
+ return s.String()
+}
+
+// SetAbortDate sets the AbortDate field's value.
+func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput {
+ s.AbortDate = &v
+ return s
+}
+
+// SetAbortRuleId sets the AbortRuleId field's value.
+func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput {
+ s.AbortRuleId = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput {
+ s.Bucket = &v
+ return s
+}
+
+// SetInitiator sets the Initiator field's value.
+func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput {
+ s.Initiator = v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput {
+ s.Key = &v
+ return s
+}
+
+// SetMaxParts sets the MaxParts field's value.
+func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput {
+ s.MaxParts = &v
+ return s
+}
+
+// SetNextPartNumberMarker sets the NextPartNumberMarker field's value.
+func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput {
+ s.NextPartNumberMarker = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput {
+ s.Owner = v
+ return s
+}
+
+// SetPartNumberMarker sets the PartNumberMarker field's value.
+func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput {
+ s.PartNumberMarker = &v
+ return s
+}
+
+// SetParts sets the Parts field's value.
+func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput {
+ s.Parts = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled
+type LoggingEnabled struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the bucket where you want Amazon S3 to store server access logs.
+ // You can have your logs delivered to any bucket that you own, including the
+ // same bucket that is being logged. You can also configure multiple buckets
+ // to deliver their logs to the same target bucket. In this case you should
+ // choose a different TargetPrefix for each source bucket so that the delivered
+ // log files can be distinguished by key.
+ TargetBucket *string `type:"string"`
+
+ TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"`
+
+ // This element lets you specify a prefix for the keys that the log files will
+ // be stored under.
+ TargetPrefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s LoggingEnabled) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LoggingEnabled) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LoggingEnabled) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"}
+ if s.TargetGrants != nil {
+ for i, v := range s.TargetGrants {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTargetBucket sets the TargetBucket field's value.
+func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled {
+ s.TargetBucket = &v
+ return s
+}
+
+// SetTargetGrants sets the TargetGrants field's value.
+func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled {
+ s.TargetGrants = v
+ return s
+}
+
+// SetTargetPrefix sets the TargetPrefix field's value.
+func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled {
+ s.TargetPrefix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator
+type MetricsAndOperator struct {
+ _ struct{} `type:"structure"`
+
+ // The prefix used when evaluating an AND predicate.
+ Prefix *string `type:"string"`
+
+ // The list of tags used when evaluating an AND predicate.
+ Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s MetricsAndOperator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricsAndOperator) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MetricsAndOperator) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator {
+ s.Prefix = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator {
+ s.Tags = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration
+type MetricsConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies a metrics configuration filter. The metrics configuration will
+ // only include objects that meet the filter's criteria. A filter must be a
+ // prefix, a tag, or a conjunction (MetricsAndOperator).
+ Filter *MetricsFilter `type:"structure"`
+
+ // The ID used to identify the metrics configuration.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s MetricsConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricsConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MetricsConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"}
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFilter sets the Filter field's value.
+func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter
+type MetricsFilter struct {
+ _ struct{} `type:"structure"`
+
+ // A conjunction (logical AND) of predicates, which is used in evaluating a
+ // metrics filter. The operator must have at least two predicates, and an object
+ // must match all of the predicates in order for the filter to apply.
+ And *MetricsAndOperator `type:"structure"`
+
+ // The prefix used when evaluating a metrics filter.
+ Prefix *string `type:"string"`
+
+ // The tag used when evaluating a metrics filter.
+ Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s MetricsFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricsFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MetricsFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"}
+ if s.And != nil {
+ if err := s.And.Validate(); err != nil {
+ invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tag != nil {
+ if err := s.Tag.Validate(); err != nil {
+ invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter {
+ s.And = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter {
+ s.Prefix = &v
+ return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter {
+ s.Tag = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload
+type MultipartUpload struct {
+ _ struct{} `type:"structure"`
+
+ // Date and time at which the multipart upload was initiated.
+ Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Identifies who initiated the multipart upload.
+ Initiator *Initiator `type:"structure"`
+
+ // Key of the object for which the multipart upload was initiated.
+ Key *string `min:"1" type:"string"`
+
+ Owner *Owner `type:"structure"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"StorageClass"`
+
+ // Upload ID that identifies the multipart upload.
+ UploadId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s MultipartUpload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MultipartUpload) GoString() string {
+ return s.String()
+}
+
+// SetInitiated sets the Initiated field's value.
+func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload {
+ s.Initiated = &v
+ return s
+}
+
+// SetInitiator sets the Initiator field's value.
+func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload {
+ s.Initiator = v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *MultipartUpload) SetKey(v string) *MultipartUpload {
+ s.Key = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload {
+ s.Owner = v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload {
+ s.StorageClass = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload {
+ s.UploadId = &v
+ return s
+}
+
+// Specifies when noncurrent object versions expire. Upon expiration, Amazon
+// S3 permanently deletes the noncurrent object versions. You set this lifecycle
+// configuration action on a bucket that has versioning enabled (or suspended)
+// to request that Amazon S3 delete noncurrent object versions at a specific
+// period in the object's lifetime.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration
+type NoncurrentVersionExpiration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. For information about the noncurrent days
+ // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
+ // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+ NoncurrentDays *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s NoncurrentVersionExpiration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NoncurrentVersionExpiration) GoString() string {
+ return s.String()
+}
+
+// SetNoncurrentDays sets the NoncurrentDays field's value.
+func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration {
+ s.NoncurrentDays = &v
+ return s
+}
+
+// Container for the transition rule that describes when noncurrent objects
+// transition to the STANDARD_IA or GLACIER storage class. If your bucket is
+// versioning-enabled (or versioning is suspended), you can set this action
+// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA
+// or GLACIER storage class at a specific period in the object's lifetime.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition
+type NoncurrentVersionTransition struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. For information about the noncurrent days
+ // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
+ // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+ NoncurrentDays *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"TransitionStorageClass"`
+}
+
+// String returns the string representation
+func (s NoncurrentVersionTransition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NoncurrentVersionTransition) GoString() string {
+ return s.String()
+}
+
+// SetNoncurrentDays sets the NoncurrentDays field's value.
+func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition {
+ s.NoncurrentDays = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition {
+ s.StorageClass = &v
+ return s
+}
+
+// Container for specifying the notification configuration of the bucket. If
+// this element is empty, notifications are turned off on the bucket.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration
+type NotificationConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"`
+
+ QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"`
+
+ TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s NotificationConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotificationConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *NotificationConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"}
+ if s.LambdaFunctionConfigurations != nil {
+ for i, v := range s.LambdaFunctionConfigurations {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.QueueConfigurations != nil {
+ for i, v := range s.QueueConfigurations {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.TopicConfigurations != nil {
+ for i, v := range s.TopicConfigurations {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value.
+func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration {
+ s.LambdaFunctionConfigurations = v
+ return s
+}
+
+// SetQueueConfigurations sets the QueueConfigurations field's value.
+func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration {
+ s.QueueConfigurations = v
+ return s
+}
+
+// SetTopicConfigurations sets the TopicConfigurations field's value.
+func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration {
+ s.TopicConfigurations = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated
+type NotificationConfigurationDeprecated struct {
+ _ struct{} `type:"structure"`
+
+ CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"`
+
+ QueueConfiguration *QueueConfigurationDeprecated `type:"structure"`
+
+ TopicConfiguration *TopicConfigurationDeprecated `type:"structure"`
+}
+
+// String returns the string representation
+func (s NotificationConfigurationDeprecated) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotificationConfigurationDeprecated) GoString() string {
+ return s.String()
+}
+
+// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value.
+func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated {
+ s.CloudFunctionConfiguration = v
+ return s
+}
+
+// SetQueueConfiguration sets the QueueConfiguration field's value.
+func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated {
+ s.QueueConfiguration = v
+ return s
+}
+
+// SetTopicConfiguration sets the TopicConfiguration field's value.
+func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated {
+ s.TopicConfiguration = v
+ return s
+}
+
+// Container for object key name filtering rules. For information about key
+// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter
+type NotificationConfigurationFilter struct {
+ _ struct{} `type:"structure"`
+
+ // Container for object key name prefix and suffix filtering rules.
+ Key *KeyFilter `locationName:"S3Key" type:"structure"`
+}
+
+// String returns the string representation
+func (s NotificationConfigurationFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotificationConfigurationFilter) GoString() string {
+ return s.String()
+}
+
+// SetKey sets the Key field's value.
+func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter {
+ s.Key = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object
+type Object struct {
+ _ struct{} `type:"structure"`
+
+ ETag *string `type:"string"`
+
+ Key *string `min:"1" type:"string"`
+
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Owner *Owner `type:"structure"`
+
+ Size *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"ObjectStorageClass"`
+}
+
+// String returns the string representation
+func (s Object) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Object) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *Object) SetETag(v string) *Object {
+ s.ETag = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Object) SetKey(v string) *Object {
+ s.Key = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *Object) SetLastModified(v time.Time) *Object {
+ s.LastModified = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *Object) SetOwner(v *Owner) *Object {
+ s.Owner = v
+ return s
+}
+
+// SetSize sets the Size field's value.
+func (s *Object) SetSize(v int64) *Object {
+ s.Size = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Object) SetStorageClass(v string) *Object {
+ s.StorageClass = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier
+type ObjectIdentifier struct {
+ _ struct{} `type:"structure"`
+
+ // Key name of the object to delete.
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+
+ // VersionId for the specific version of the object to delete.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ObjectIdentifier) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ObjectIdentifier) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ObjectIdentifier) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion
+type ObjectVersion struct {
+ _ struct{} `type:"structure"`
+
+ ETag *string `type:"string"`
+
+ // Specifies whether the object is (true) or is not (false) the latest version
+ // of an object.
+ IsLatest *bool `type:"boolean"`
+
+ // The object key.
+ Key *string `min:"1" type:"string"`
+
+ // Date and time the object was last modified.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Owner *Owner `type:"structure"`
+
+ // Size in bytes of the object.
+ Size *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"`
+
+ // Version ID of an object.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ObjectVersion) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ObjectVersion) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *ObjectVersion) SetETag(v string) *ObjectVersion {
+ s.ETag = &v
+ return s
+}
+
+// SetIsLatest sets the IsLatest field's value.
+func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion {
+ s.IsLatest = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ObjectVersion) SetKey(v string) *ObjectVersion {
+ s.Key = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion {
+ s.LastModified = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion {
+ s.Owner = v
+ return s
+}
+
+// SetSize sets the Size field's value.
+func (s *ObjectVersion) SetSize(v int64) *ObjectVersion {
+ s.Size = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion {
+ s.StorageClass = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner
+type Owner struct {
+ _ struct{} `type:"structure"`
+
+ DisplayName *string `type:"string"`
+
+ ID *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Owner) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Owner) GoString() string {
+ return s.String()
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Owner) SetDisplayName(v string) *Owner {
+ s.DisplayName = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Owner) SetID(v string) *Owner {
+ s.ID = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part
+type Part struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag returned when the part was uploaded.
+ ETag *string `type:"string"`
+
+ // Date and time at which the part was uploaded.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Part number identifying the part. This is a positive integer between 1 and
+ // 10,000.
+ PartNumber *int64 `type:"integer"`
+
+ // Size of the uploaded part data.
+ Size *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s Part) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Part) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *Part) SetETag(v string) *Part {
+ s.ETag = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *Part) SetLastModified(v time.Time) *Part {
+ s.LastModified = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *Part) SetPartNumber(v int64) *Part {
+ s.PartNumber = &v
+ return s
+}
+
+// SetSize sets the Size field's value.
+func (s *Part) SetSize(v int64) *Part {
+ s.Size = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest
+type PutBucketAccelerateConfigurationInput struct {
+ _ struct{} `type:"structure" payload:"AccelerateConfiguration"`
+
+ // Specifies the Accelerate Configuration you want to set for the bucket.
+ //
+ // AccelerateConfiguration is a required field
+ AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"`
+
+ // Name of the bucket for which the accelerate configuration is set.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketAccelerateConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAccelerateConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAccelerateConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"}
+ if s.AccelerateConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration"))
+ }
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccelerateConfiguration sets the AccelerateConfiguration field's value.
+func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput {
+ s.AccelerateConfiguration = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput
+type PutBucketAccelerateConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketAccelerateConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAccelerateConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest
+type PutBucketAclInput struct {
+ _ struct{} `type:"structure" payload:"AccessControlPolicy"`
+
+ // The canned ACL to apply to the bucket.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
+
+ AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+}
+
+// String returns the string representation
+func (s PutBucketAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.AccessControlPolicy != nil {
+ if err := s.AccessControlPolicy.Validate(); err != nil {
+ invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput {
+ s.ACL = &v
+ return s
+}
+
+// SetAccessControlPolicy sets the AccessControlPolicy field's value.
+func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput {
+ s.AccessControlPolicy = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput {
+ s.GrantWrite = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput
+type PutBucketAclOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAclOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest
+type PutBucketAnalyticsConfigurationInput struct {
+ _ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
+
+ // The configuration and any analyses for the analytics filter.
+ //
+ // AnalyticsConfiguration is a required field
+ AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"`
+
+ // The name of the bucket to which an analytics configuration is stored.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The identifier used to represent an analytics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketAnalyticsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAnalyticsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAnalyticsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"}
+ if s.AnalyticsConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration"))
+ }
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.AnalyticsConfiguration != nil {
+ if err := s.AnalyticsConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value.
+func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput {
+ s.AnalyticsConfiguration = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput
+type PutBucketAnalyticsConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketAnalyticsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAnalyticsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest
+type PutBucketCorsInput struct {
+ _ struct{} `type:"structure" payload:"CORSConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // CORSConfiguration is a required field
+ CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketCorsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketCorsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketCorsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.CORSConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("CORSConfiguration"))
+ }
+ if s.CORSConfiguration != nil {
+ if err := s.CORSConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCORSConfiguration sets the CORSConfiguration field's value.
+func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput {
+ s.CORSConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput
+type PutBucketCorsOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketCorsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketCorsOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest
+type PutBucketInventoryConfigurationInput struct {
+ _ struct{} `type:"structure" payload:"InventoryConfiguration"`
+
+ // The name of the bucket where the inventory configuration will be stored.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the inventory configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+
+ // Specifies the inventory configuration.
+ //
+ // InventoryConfiguration is a required field
+ InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketInventoryConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketInventoryConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketInventoryConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.InventoryConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration"))
+ }
+ if s.InventoryConfiguration != nil {
+ if err := s.InventoryConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// SetInventoryConfiguration sets the InventoryConfiguration field's value.
+func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput {
+ s.InventoryConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput
+type PutBucketInventoryConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketInventoryConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketInventoryConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest
+type PutBucketLifecycleConfigurationInput struct {
+ _ struct{} `type:"structure" payload:"LifecycleConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLifecycleConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.LifecycleConfiguration != nil {
+ if err := s.LifecycleConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
+func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput {
+ s.LifecycleConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput
+type PutBucketLifecycleConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest
+type PutBucketLifecycleInput struct {
+ _ struct{} `type:"structure" payload:"LifecycleConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLifecycleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.LifecycleConfiguration != nil {
+ if err := s.LifecycleConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
+func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput {
+ s.LifecycleConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput
+type PutBucketLifecycleOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest
+type PutBucketLoggingInput struct {
+ _ struct{} `type:"structure" payload:"BucketLoggingStatus"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // BucketLoggingStatus is a required field
+ BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketLoggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLoggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLoggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.BucketLoggingStatus == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus"))
+ }
+ if s.BucketLoggingStatus != nil {
+ if err := s.BucketLoggingStatus.Validate(); err != nil {
+ invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetBucketLoggingStatus sets the BucketLoggingStatus field's value.
+func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput {
+ s.BucketLoggingStatus = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput
+type PutBucketLoggingOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLoggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLoggingOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest
+type PutBucketMetricsConfigurationInput struct {
+ _ struct{} `type:"structure" payload:"MetricsConfiguration"`
+
+ // The name of the bucket for which the metrics configuration is set.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the metrics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+
+ // Specifies the metrics configuration.
+ //
+ // MetricsConfiguration is a required field
+ MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketMetricsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketMetricsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketMetricsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.MetricsConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration"))
+ }
+ if s.MetricsConfiguration != nil {
+ if err := s.MetricsConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// SetMetricsConfiguration sets the MetricsConfiguration field's value.
+func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput {
+ s.MetricsConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput
+type PutBucketMetricsConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketMetricsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketMetricsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest
+type PutBucketNotificationConfigurationInput struct {
+ _ struct{} `type:"structure" payload:"NotificationConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Container for specifying the notification configuration of the bucket. If
+ // this element is empty, notifications are turned off on the bucket.
+ //
+ // NotificationConfiguration is a required field
+ NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketNotificationConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.NotificationConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration"))
+ }
+ if s.NotificationConfiguration != nil {
+ if err := s.NotificationConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetNotificationConfiguration sets the NotificationConfiguration field's value.
+func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput {
+ s.NotificationConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput
+type PutBucketNotificationConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest
+type PutBucketNotificationInput struct {
+ _ struct{} `type:"structure" payload:"NotificationConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // NotificationConfiguration is a required field
+ NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketNotificationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.NotificationConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetNotificationConfiguration sets the NotificationConfiguration field's value.
+func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput {
+ s.NotificationConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput
+type PutBucketNotificationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest
+type PutBucketPolicyInput struct {
+ _ struct{} `type:"structure" payload:"Policy"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The bucket policy as a JSON document.
+ //
+ // Policy is a required field
+ Policy *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Policy == nil {
+ invalidParams.Add(request.NewErrParamRequired("Policy"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput {
+ s.Policy = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput
+type PutBucketPolicyOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketPolicyOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest
+type PutBucketReplicationInput struct {
+ _ struct{} `type:"structure" payload:"ReplicationConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Container for replication rules. You can add as many as 1,000 rules. Total
+ // replication configuration size can be up to 2 MB.
+ //
+ // ReplicationConfiguration is a required field
+ ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketReplicationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketReplicationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketReplicationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.ReplicationConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration"))
+ }
+ if s.ReplicationConfiguration != nil {
+ if err := s.ReplicationConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
+func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput {
+ s.ReplicationConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput
+type PutBucketReplicationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketReplicationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketReplicationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest
+type PutBucketRequestPaymentInput struct {
+ _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // RequestPaymentConfiguration is a required field
+ RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketRequestPaymentInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketRequestPaymentInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketRequestPaymentInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.RequestPaymentConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration"))
+ }
+ if s.RequestPaymentConfiguration != nil {
+ if err := s.RequestPaymentConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value.
+func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput {
+ s.RequestPaymentConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput
+type PutBucketRequestPaymentOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketRequestPaymentOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketRequestPaymentOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest
+type PutBucketTaggingInput struct {
+ _ struct{} `type:"structure" payload:"Tagging"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Tagging is a required field
+ Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Tagging == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tagging"))
+ }
+ if s.Tagging != nil {
+ if err := s.Tagging.Validate(); err != nil {
+ invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput {
+ s.Tagging = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput
+type PutBucketTaggingOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest
+type PutBucketVersioningInput struct {
+ _ struct{} `type:"structure" payload:"VersioningConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // VersioningConfiguration is a required field
+ VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketVersioningInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketVersioningInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketVersioningInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.VersioningConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput {
+ s.MFA = &v
+ return s
+}
+
+// SetVersioningConfiguration sets the VersioningConfiguration field's value.
+func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput {
+ s.VersioningConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput
+type PutBucketVersioningOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketVersioningOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketVersioningOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest
+type PutBucketWebsiteInput struct {
+ _ struct{} `type:"structure" payload:"WebsiteConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // WebsiteConfiguration is a required field
+ WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketWebsiteInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketWebsiteInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketWebsiteInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.WebsiteConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration"))
+ }
+ if s.WebsiteConfiguration != nil {
+ if err := s.WebsiteConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetWebsiteConfiguration sets the WebsiteConfiguration field's value.
+func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput {
+ s.WebsiteConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput
+type PutBucketWebsiteOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketWebsiteOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketWebsiteOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest
+type PutObjectAclInput struct {
+ _ struct{} `type:"structure" payload:"AccessControlPolicy"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.AccessControlPolicy != nil {
+ if err := s.AccessControlPolicy.Validate(); err != nil {
+ invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput {
+ s.ACL = &v
+ return s
+}
+
+// SetAccessControlPolicy sets the AccessControlPolicy field's value.
+func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput {
+ s.AccessControlPolicy = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput {
+ s.GrantWrite = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput
+type PutObjectAclOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s PutObjectAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectAclOutput) GoString() string {
+ return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest
+type PutObjectInput struct {
+ _ struct{} `type:"structure" payload:"Body"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // Object data.
+ Body io.ReadSeeker `type:"blob"`
+
+ // Name of the bucket to which the PUT operation was initiated.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes. This parameter is useful when the size of the
+ // body cannot be determined automatically.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Object key for which the PUT operation was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *PutObjectInput) SetACL(v string) *PutObjectInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBody sets the Body field's value.
+func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput {
+ s.Body = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectInput) SetBucket(v string) *PutObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *PutObjectInput) SetContentType(v string) *PutObjectInput {
+ s.ContentType = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput {
+ s.Expires = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectInput) SetKey(v string) *PutObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput {
+ s.Metadata = v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutObjectInput) SetTagging(v string) *PutObjectInput {
+ s.Tagging = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput
+type PutObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag for the uploaded object.
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured, this will contain the expiration
+ // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest
+type PutObjectTaggingInput struct {
+ _ struct{} `type:"structure" payload:"Tagging"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Tagging is a required field
+ Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
+
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.Tagging == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tagging"))
+ }
+ if s.Tagging != nil {
+ if err := s.Tagging.Validate(); err != nil {
+ invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput {
+ s.Key = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput {
+ s.Tagging = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput
+type PutObjectTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Container for specifying an configuration when you want Amazon S3 to publish
+// events to an Amazon Simple Queue Service (Amazon SQS) queue.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration
+type QueueConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Events is a required field
+ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+ // Container for object key name filtering rules. For information about key
+ // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ Filter *NotificationConfigurationFilter `type:"structure"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects
+ // events of specified type.
+ //
+ // QueueArn is a required field
+ QueueArn *string `locationName:"Queue" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s QueueConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueueConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *QueueConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"}
+ if s.Events == nil {
+ invalidParams.Add(request.NewErrParamRequired("Events"))
+ }
+ if s.QueueArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueArn"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEvents sets the Events field's value.
+func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration {
+ s.Events = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *QueueConfiguration) SetId(v string) *QueueConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetQueueArn sets the QueueArn field's value.
+func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration {
+ s.QueueArn = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated
+type QueueConfigurationDeprecated struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket event for which to send notifications.
+ Event *string `deprecated:"true" type:"string" enum:"Event"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ Queue *string `type:"string"`
+}
+
+// String returns the string representation
+func (s QueueConfigurationDeprecated) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueueConfigurationDeprecated) GoString() string {
+ return s.String()
+}
+
+// SetEvent sets the Event field's value.
+func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated {
+ s.Event = &v
+ return s
+}
+
+// SetEvents sets the Events field's value.
+func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated {
+ s.Events = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated {
+ s.Id = &v
+ return s
+}
+
+// SetQueue sets the Queue field's value.
+func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated {
+ s.Queue = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect
+type Redirect struct {
+ _ struct{} `type:"structure"`
+
+ // The host name to use in the redirect request.
+ HostName *string `type:"string"`
+
+ // The HTTP redirect code to use on the response. Not required if one of the
+ // siblings is present.
+ HttpRedirectCode *string `type:"string"`
+
+ // Protocol to use (http, https) when redirecting requests. The default is the
+ // protocol that is used in the original request.
+ Protocol *string `type:"string" enum:"Protocol"`
+
+ // The object key prefix to use in the redirect request. For example, to redirect
+ // requests for all pages with prefix docs/ (objects in the docs/ folder) to
+ // documents/, you can set a condition block with KeyPrefixEquals set to docs/
+ // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required
+ // if one of the siblings is present. Can be present only if ReplaceKeyWith
+ // is not provided.
+ ReplaceKeyPrefixWith *string `type:"string"`
+
+ // The specific object key to use in the redirect request. For example, redirect
+ // request to error.html. Not required if one of the sibling is present. Can
+ // be present only if ReplaceKeyPrefixWith is not provided.
+ ReplaceKeyWith *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Redirect) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Redirect) GoString() string {
+ return s.String()
+}
+
+// SetHostName sets the HostName field's value.
+func (s *Redirect) SetHostName(v string) *Redirect {
+ s.HostName = &v
+ return s
+}
+
+// SetHttpRedirectCode sets the HttpRedirectCode field's value.
+func (s *Redirect) SetHttpRedirectCode(v string) *Redirect {
+ s.HttpRedirectCode = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *Redirect) SetProtocol(v string) *Redirect {
+ s.Protocol = &v
+ return s
+}
+
+// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value.
+func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect {
+ s.ReplaceKeyPrefixWith = &v
+ return s
+}
+
+// SetReplaceKeyWith sets the ReplaceKeyWith field's value.
+func (s *Redirect) SetReplaceKeyWith(v string) *Redirect {
+ s.ReplaceKeyWith = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo
+type RedirectAllRequestsTo struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the host where requests will be redirected.
+ //
+ // HostName is a required field
+ HostName *string `type:"string" required:"true"`
+
+ // Protocol to use (http, https) when redirecting requests. The default is the
+ // protocol that is used in the original request.
+ Protocol *string `type:"string" enum:"Protocol"`
+}
+
+// String returns the string representation
+func (s RedirectAllRequestsTo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RedirectAllRequestsTo) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RedirectAllRequestsTo) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"}
+ if s.HostName == nil {
+ invalidParams.Add(request.NewErrParamRequired("HostName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetHostName sets the HostName field's value.
+func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo {
+ s.HostName = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo {
+ s.Protocol = &v
+ return s
+}
+
+// Container for replication rules. You can add as many as 1,000 rules. Total
+// replication configuration size can be up to 2 MB.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration
+type ReplicationConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating
+ // the objects.
+ //
+ // Role is a required field
+ Role *string `type:"string" required:"true"`
+
+ // Container for information about a particular replication rule. Replication
+ // configuration must have at least one rule and can contain up to 1,000 rules.
+ //
+ // Rules is a required field
+ Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s ReplicationConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicationConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"}
+ if s.Role == nil {
+ invalidParams.Add(request.NewErrParamRequired("Role"))
+ }
+ if s.Rules == nil {
+ invalidParams.Add(request.NewErrParamRequired("Rules"))
+ }
+ if s.Rules != nil {
+ for i, v := range s.Rules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRole sets the Role field's value.
+func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration {
+ s.Role = &v
+ return s
+}
+
+// SetRules sets the Rules field's value.
+func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration {
+ s.Rules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule
+type ReplicationRule struct {
+ _ struct{} `type:"structure"`
+
+ // Destination is a required field
+ Destination *Destination `type:"structure" required:"true"`
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string `type:"string"`
+
+ // Object keyname prefix identifying one or more objects to which the rule applies.
+ // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes
+ // are not supported.
+ //
+ // Prefix is a required field
+ Prefix *string `type:"string" required:"true"`
+
+ // The rule is ignored if status is not Enabled.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"`
+}
+
+// String returns the string representation
+func (s ReplicationRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicationRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"}
+ if s.Destination == nil {
+ invalidParams.Add(request.NewErrParamRequired("Destination"))
+ }
+ if s.Prefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Prefix"))
+ }
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+ if s.Destination != nil {
+ if err := s.Destination.Validate(); err != nil {
+ invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDestination sets the Destination field's value.
+func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule {
+ s.Destination = v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *ReplicationRule) SetID(v string) *ReplicationRule {
+ s.ID = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule {
+ s.Prefix = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *ReplicationRule) SetStatus(v string) *ReplicationRule {
+ s.Status = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration
+type RequestPaymentConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies who pays for the download and request fees.
+ //
+ // Payer is a required field
+ Payer *string `type:"string" required:"true" enum:"Payer"`
+}
+
+// String returns the string representation
+func (s RequestPaymentConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RequestPaymentConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RequestPaymentConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"}
+ if s.Payer == nil {
+ invalidParams.Add(request.NewErrParamRequired("Payer"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPayer sets the Payer field's value.
+func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration {
+ s.Payer = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest
+type RestoreObjectInput struct {
+ _ struct{} `type:"structure" payload:"RestoreRequest"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"`
+
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s RestoreObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.RestoreRequest != nil {
+ if err := s.RestoreRequest.Validate(); err != nil {
+ invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetRestoreRequest sets the RestoreRequest field's value.
+func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput {
+ s.RestoreRequest = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput
+type RestoreObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s RestoreObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest
+type RestoreRequest struct {
+ _ struct{} `type:"structure"`
+
+ // Lifetime of the active copy in days
+ //
+ // Days is a required field
+ Days *int64 `type:"integer" required:"true"`
+
+ // Glacier related prameters pertaining to this job.
+ GlacierJobParameters *GlacierJobParameters `type:"structure"`
+}
+
+// String returns the string representation
+func (s RestoreRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreRequest) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreRequest) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"}
+ if s.Days == nil {
+ invalidParams.Add(request.NewErrParamRequired("Days"))
+ }
+ if s.GlacierJobParameters != nil {
+ if err := s.GlacierJobParameters.Validate(); err != nil {
+ invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDays sets the Days field's value.
+func (s *RestoreRequest) SetDays(v int64) *RestoreRequest {
+ s.Days = &v
+ return s
+}
+
+// SetGlacierJobParameters sets the GlacierJobParameters field's value.
+func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest {
+ s.GlacierJobParameters = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule
+type RoutingRule struct {
+ _ struct{} `type:"structure"`
+
+ // A container for describing a condition that must be met for the specified
+ // redirect to apply. For example, 1. If request is for pages in the /docs folder,
+ // redirect to the /documents folder. 2. If request results in HTTP error 4xx,
+ // redirect request to another host where you might process the error.
+ Condition *Condition `type:"structure"`
+
+ // Container for redirect information. You can redirect requests to another
+ // host, to another page, or with another protocol. In the event of an error,
+ // you can can specify a different error code to return.
+ //
+ // Redirect is a required field
+ Redirect *Redirect `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s RoutingRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RoutingRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RoutingRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RoutingRule"}
+ if s.Redirect == nil {
+ invalidParams.Add(request.NewErrParamRequired("Redirect"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCondition sets the Condition field's value.
+func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule {
+ s.Condition = v
+ return s
+}
+
+// SetRedirect sets the Redirect field's value.
+func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule {
+ s.Redirect = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule
+type Rule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the days since the initiation of an Incomplete Multipart Upload
+ // that Lifecycle will wait before permanently removing all parts of the upload.
+ AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
+
+ Expiration *LifecycleExpiration `type:"structure"`
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string `type:"string"`
+
+ // Specifies when noncurrent object versions expire. Upon expiration, Amazon
+ // S3 permanently deletes the noncurrent object versions. You set this lifecycle
+ // configuration action on a bucket that has versioning enabled (or suspended)
+ // to request that Amazon S3 delete noncurrent object versions at a specific
+ // period in the object's lifetime.
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
+
+ // Container for the transition rule that describes when noncurrent objects
+ // transition to the STANDARD_IA or GLACIER storage class. If your bucket is
+ // versioning-enabled (or versioning is suspended), you can set this action
+ // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA
+ // or GLACIER storage class at a specific period in the object's lifetime.
+ NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"`
+
+ // Prefix identifying one or more objects to which the rule applies.
+ //
+ // Prefix is a required field
+ Prefix *string `type:"string" required:"true"`
+
+ // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
+ // is not currently being applied.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
+
+ Transition *Transition `type:"structure"`
+}
+
+// String returns the string representation
+func (s Rule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Rule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Rule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Rule"}
+ if s.Prefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Prefix"))
+ }
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value.
+func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule {
+ s.AbortIncompleteMultipartUpload = v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule {
+ s.Expiration = v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Rule) SetID(v string) *Rule {
+ s.ID = &v
+ return s
+}
+
+// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value.
+func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule {
+ s.NoncurrentVersionExpiration = v
+ return s
+}
+
+// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value.
+func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule {
+ s.NoncurrentVersionTransition = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *Rule) SetPrefix(v string) *Rule {
+ s.Prefix = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *Rule) SetStatus(v string) *Rule {
+ s.Status = &v
+ return s
+}
+
+// SetTransition sets the Transition field's value.
+func (s *Rule) SetTransition(v *Transition) *Rule {
+ s.Transition = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis
+type StorageClassAnalysis struct {
+ _ struct{} `type:"structure"`
+
+ // A container used to describe how data related to the storage class analysis
+ // should be exported.
+ DataExport *StorageClassAnalysisDataExport `type:"structure"`
+}
+
+// String returns the string representation
+func (s StorageClassAnalysis) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StorageClassAnalysis) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StorageClassAnalysis) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"}
+ if s.DataExport != nil {
+ if err := s.DataExport.Validate(); err != nil {
+ invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDataExport sets the DataExport field's value.
+func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis {
+ s.DataExport = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport
+type StorageClassAnalysisDataExport struct {
+ _ struct{} `type:"structure"`
+
+ // The place to store the data for an analysis.
+ //
+ // Destination is a required field
+ Destination *AnalyticsExportDestination `type:"structure" required:"true"`
+
+ // The version of the output schema to use when exporting data. Must be V_1.
+ //
+ // OutputSchemaVersion is a required field
+ OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"`
+}
+
+// String returns the string representation
+func (s StorageClassAnalysisDataExport) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StorageClassAnalysisDataExport) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StorageClassAnalysisDataExport) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"}
+ if s.Destination == nil {
+ invalidParams.Add(request.NewErrParamRequired("Destination"))
+ }
+ if s.OutputSchemaVersion == nil {
+ invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion"))
+ }
+ if s.Destination != nil {
+ if err := s.Destination.Validate(); err != nil {
+ invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDestination sets the Destination field's value.
+func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport {
+ s.Destination = v
+ return s
+}
+
+// SetOutputSchemaVersion sets the OutputSchemaVersion field's value.
+func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport {
+ s.OutputSchemaVersion = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag
+type Tag struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the tag.
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+
+ // Value of the tag.
+ //
+ // Value is a required field
+ Value *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Tag) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tag) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tag) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Tag"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.Value == nil {
+ invalidParams.Add(request.NewErrParamRequired("Value"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *Tag) SetKey(v string) *Tag {
+ s.Key = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *Tag) SetValue(v string) *Tag {
+ s.Value = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging
+type Tagging struct {
+ _ struct{} `type:"structure"`
+
+ // TagSet is a required field
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s Tagging) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tagging) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tagging) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Tagging"}
+ if s.TagSet == nil {
+ invalidParams.Add(request.NewErrParamRequired("TagSet"))
+ }
+ if s.TagSet != nil {
+ for i, v := range s.TagSet {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTagSet sets the TagSet field's value.
+func (s *Tagging) SetTagSet(v []*Tag) *Tagging {
+ s.TagSet = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant
+type TargetGrant struct {
+ _ struct{} `type:"structure"`
+
+ Grantee *Grantee `type:"structure"`
+
+ // Logging permissions assigned to the Grantee for the bucket.
+ Permission *string `type:"string" enum:"BucketLogsPermission"`
+}
+
+// String returns the string representation
+func (s TargetGrant) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TargetGrant) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TargetGrant) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TargetGrant"}
+ if s.Grantee != nil {
+ if err := s.Grantee.Validate(); err != nil {
+ invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGrantee sets the Grantee field's value.
+func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant {
+ s.Grantee = v
+ return s
+}
+
+// SetPermission sets the Permission field's value.
+func (s *TargetGrant) SetPermission(v string) *TargetGrant {
+ s.Permission = &v
+ return s
+}
+
+// Container for specifying the configuration when you want Amazon S3 to publish
+// events to an Amazon Simple Notification Service (Amazon SNS) topic.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration
+type TopicConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Events is a required field
+ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+ // Container for object key name filtering rules. For information about key
+ // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ Filter *NotificationConfigurationFilter `type:"structure"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects
+ // events of specified type.
+ //
+ // TopicArn is a required field
+ TopicArn *string `locationName:"Topic" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s TopicConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TopicConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TopicConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"}
+ if s.Events == nil {
+ invalidParams.Add(request.NewErrParamRequired("Events"))
+ }
+ if s.TopicArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("TopicArn"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEvents sets the Events field's value.
+func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration {
+ s.Events = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *TopicConfiguration) SetId(v string) *TopicConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetTopicArn sets the TopicArn field's value.
+func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration {
+ s.TopicArn = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated
+type TopicConfigurationDeprecated struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket event for which to send notifications.
+ Event *string `deprecated:"true" type:"string" enum:"Event"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ // Amazon SNS topic to which Amazon S3 will publish a message to report the
+ // specified events for the bucket.
+ Topic *string `type:"string"`
+}
+
+// String returns the string representation
+func (s TopicConfigurationDeprecated) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TopicConfigurationDeprecated) GoString() string {
+ return s.String()
+}
+
+// SetEvent sets the Event field's value.
+func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated {
+ s.Event = &v
+ return s
+}
+
+// SetEvents sets the Events field's value.
+func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated {
+ s.Events = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated {
+ s.Id = &v
+ return s
+}
+
+// SetTopic sets the Topic field's value.
+func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated {
+ s.Topic = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition
+type Transition struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates at what date the object is to be moved or deleted. Should be in
+ // GMT ISO 8601 Format.
+ Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Indicates the lifetime, in days, of the objects that are subject to the rule.
+ // The value must be a non-zero positive integer.
+ Days *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"TransitionStorageClass"`
+}
+
+// String returns the string representation
+func (s Transition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Transition) GoString() string {
+ return s.String()
+}
+
+// SetDate sets the Date field's value.
+func (s *Transition) SetDate(v time.Time) *Transition {
+ s.Date = &v
+ return s
+}
+
+// SetDays sets the Days field's value.
+func (s *Transition) SetDays(v int64) *Transition {
+ s.Days = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Transition) SetStorageClass(v string) *Transition {
+ s.StorageClass = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest
+type UploadPartCopyInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The name of the source bucket and key name of the source object, separated
+ // by a slash (/). Must be URL-encoded.
+ //
+ // CopySource is a required field
+ CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
+
+ // Copies the object if it has been modified since the specified time.
+ CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Copies the object if its entity tag (ETag) is different than the specified
+ // ETag.
+ CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
+
+ // Copies the object if it hasn't been modified since the specified time.
+ CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // The range of bytes to copy from the source object. The range value must use
+ // the form bytes=first-last, where the first and last are the zero-based byte
+ // offsets to copy. For example, bytes=0-9 indicates that you want to copy the
+ // first ten bytes of the source. You can copy a range only if the source object
+ // is greater than 5 GB.
+ CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"`
+
+ // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
+ CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one
+ // that was used when the source object was created.
+ CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of part being copied. This is a positive integer between 1 and
+ // 10,000.
+ //
+ // PartNumber is a required field
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header. This must be the same encryption key specified in the initiate multipart
+ // upload request.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Upload ID identifying the multipart upload whose part is being copied.
+ //
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UploadPartCopyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartCopyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UploadPartCopyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.CopySource == nil {
+ invalidParams.Add(request.NewErrParamRequired("CopySource"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.PartNumber == nil {
+ invalidParams.Add(request.NewErrParamRequired("PartNumber"))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCopySource sets the CopySource field's value.
+func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput {
+ s.CopySource = &v
+ return s
+}
+
+// SetCopySourceIfMatch sets the CopySourceIfMatch field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput {
+ s.CopySourceIfMatch = &v
+ return s
+}
+
+// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput {
+ s.CopySourceIfModifiedSince = &v
+ return s
+}
+
+// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput {
+ s.CopySourceIfNoneMatch = &v
+ return s
+}
+
+// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput {
+ s.CopySourceIfUnmodifiedSince = &v
+ return s
+}
+
+// SetCopySourceRange sets the CopySourceRange field's value.
+func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput {
+ s.CopySourceRange = &v
+ return s
+}
+
+// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput {
+ s.CopySourceSSECustomerAlgorithm = &v
+ return s
+}
+
+// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput {
+ s.CopySourceSSECustomerKey = &v
+ return s
+}
+
+// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput {
+ s.CopySourceSSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput
+type UploadPartCopyOutput struct {
+ _ struct{} `type:"structure" payload:"CopyPartResult"`
+
+ CopyPartResult *CopyPartResult `type:"structure"`
+
+ // The version of the source object that was copied, if you have enabled versioning
+ // on the source bucket.
+ CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+}
+
+// String returns the string representation
+func (s UploadPartCopyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartCopyOutput) GoString() string {
+ return s.String()
+}
+
+// SetCopyPartResult sets the CopyPartResult field's value.
+func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput {
+ s.CopyPartResult = v
+ return s
+}
+
+// SetCopySourceVersionId sets the CopySourceVersionId field's value.
+func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput {
+ s.CopySourceVersionId = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest
+type UploadPartInput struct {
+ _ struct{} `type:"structure" payload:"Body"`
+
+ // Object data.
+ Body io.ReadSeeker `type:"blob"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Size of the body in bytes. This parameter is useful when the size of the
+ // body cannot be determined automatically.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of part being uploaded. This is a positive integer between 1
+ // and 10,000.
+ //
+ // PartNumber is a required field
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header. This must be the same encryption key specified in the initiate multipart
+ // upload request.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Upload ID identifying the multipart upload whose part is being uploaded.
+ //
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UploadPartInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UploadPartInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.PartNumber == nil {
+ invalidParams.Add(request.NewErrParamRequired("PartNumber"))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBody sets the Body field's value.
+func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput {
+ s.Body = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *UploadPartInput) SetBucket(v string) *UploadPartInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *UploadPartInput) SetKey(v string) *UploadPartInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput
+type UploadPartOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag for the uploaded object.
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+}
+
+// String returns the string representation
+func (s UploadPartOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartOutput) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration
+type VersioningConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA
+ // delete. If the bucket has never been so configured, this element is not returned.
+ MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"`
+
+ // The versioning state of the bucket.
+ Status *string `type:"string" enum:"BucketVersioningStatus"`
+}
+
+// String returns the string representation
+func (s VersioningConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s VersioningConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetMFADelete sets the MFADelete field's value.
+func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration {
+ s.MFADelete = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration {
+ s.Status = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration
+type WebsiteConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ ErrorDocument *ErrorDocument `type:"structure"`
+
+ IndexDocument *IndexDocument `type:"structure"`
+
+ RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+
+ RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
+}
+
+// String returns the string representation
+func (s WebsiteConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s WebsiteConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *WebsiteConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"}
+ if s.ErrorDocument != nil {
+ if err := s.ErrorDocument.Validate(); err != nil {
+ invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.IndexDocument != nil {
+ if err := s.IndexDocument.Validate(); err != nil {
+ invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.RedirectAllRequestsTo != nil {
+ if err := s.RedirectAllRequestsTo.Validate(); err != nil {
+ invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.RoutingRules != nil {
+ for i, v := range s.RoutingRules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetErrorDocument sets the ErrorDocument field's value.
+func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration {
+ s.ErrorDocument = v
+ return s
+}
+
+// SetIndexDocument sets the IndexDocument field's value.
+func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration {
+ s.IndexDocument = v
+ return s
+}
+
+// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value.
+func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration {
+ s.RedirectAllRequestsTo = v
+ return s
+}
+
+// SetRoutingRules sets the RoutingRules field's value.
+func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration {
+ s.RoutingRules = v
+ return s
+}
+
+const (
+ // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value
+ AnalyticsS3ExportFileFormatCsv = "CSV"
+)
+
+const (
+ // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value
+ BucketAccelerateStatusEnabled = "Enabled"
+
+ // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value
+ BucketAccelerateStatusSuspended = "Suspended"
+)
+
+const (
+ // BucketCannedACLPrivate is a BucketCannedACL enum value
+ BucketCannedACLPrivate = "private"
+
+ // BucketCannedACLPublicRead is a BucketCannedACL enum value
+ BucketCannedACLPublicRead = "public-read"
+
+ // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value
+ BucketCannedACLPublicReadWrite = "public-read-write"
+
+ // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value
+ BucketCannedACLAuthenticatedRead = "authenticated-read"
+)
+
+const (
+ // BucketLocationConstraintEu is a BucketLocationConstraint enum value
+ BucketLocationConstraintEu = "EU"
+
+ // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintEuWest1 = "eu-west-1"
+
+ // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintUsWest1 = "us-west-1"
+
+ // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value
+ BucketLocationConstraintUsWest2 = "us-west-2"
+
+ // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApSouth1 = "ap-south-1"
+
+ // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApSoutheast1 = "ap-southeast-1"
+
+ // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApSoutheast2 = "ap-southeast-2"
+
+ // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApNortheast1 = "ap-northeast-1"
+
+ // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintSaEast1 = "sa-east-1"
+
+ // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintCnNorth1 = "cn-north-1"
+
+ // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintEuCentral1 = "eu-central-1"
+)
+
+const (
+ // BucketLogsPermissionFullControl is a BucketLogsPermission enum value
+ BucketLogsPermissionFullControl = "FULL_CONTROL"
+
+ // BucketLogsPermissionRead is a BucketLogsPermission enum value
+ BucketLogsPermissionRead = "READ"
+
+ // BucketLogsPermissionWrite is a BucketLogsPermission enum value
+ BucketLogsPermissionWrite = "WRITE"
+)
+
+const (
+ // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value
+ BucketVersioningStatusEnabled = "Enabled"
+
+ // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value
+ BucketVersioningStatusSuspended = "Suspended"
+)
+
+// Requests Amazon S3 to encode the object keys in the response and specifies
+// the encoding method to use. An object key may contain any Unicode character;
+// however, XML 1.0 parser cannot parse some characters, such as characters
+// with an ASCII value from 0 to 10. For characters that are not supported in
+// XML 1.0, you can add this parameter to request that Amazon S3 encode the
+// keys in the response.
+const (
+ // EncodingTypeUrl is a EncodingType enum value
+ EncodingTypeUrl = "url"
+)
+
+// Bucket event for which to send notifications.
+const (
+ // EventS3ReducedRedundancyLostObject is a Event enum value
+ EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
+
+ // EventS3ObjectCreated is a Event enum value
+ EventS3ObjectCreated = "s3:ObjectCreated:*"
+
+ // EventS3ObjectCreatedPut is a Event enum value
+ EventS3ObjectCreatedPut = "s3:ObjectCreated:Put"
+
+ // EventS3ObjectCreatedPost is a Event enum value
+ EventS3ObjectCreatedPost = "s3:ObjectCreated:Post"
+
+ // EventS3ObjectCreatedCopy is a Event enum value
+ EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy"
+
+ // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value
+ EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
+
+ // EventS3ObjectRemoved is a Event enum value
+ EventS3ObjectRemoved = "s3:ObjectRemoved:*"
+
+ // EventS3ObjectRemovedDelete is a Event enum value
+ EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
+
+ // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value
+ EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
+)
+
+const (
+ // ExpirationStatusEnabled is a ExpirationStatus enum value
+ ExpirationStatusEnabled = "Enabled"
+
+ // ExpirationStatusDisabled is a ExpirationStatus enum value
+ ExpirationStatusDisabled = "Disabled"
+)
+
+const (
+ // FilterRuleNamePrefix is a FilterRuleName enum value
+ FilterRuleNamePrefix = "prefix"
+
+ // FilterRuleNameSuffix is a FilterRuleName enum value
+ FilterRuleNameSuffix = "suffix"
+)
+
+const (
+ // InventoryFormatCsv is a InventoryFormat enum value
+ InventoryFormatCsv = "CSV"
+)
+
+const (
+ // InventoryFrequencyDaily is a InventoryFrequency enum value
+ InventoryFrequencyDaily = "Daily"
+
+ // InventoryFrequencyWeekly is a InventoryFrequency enum value
+ InventoryFrequencyWeekly = "Weekly"
+)
+
+const (
+ // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value
+ InventoryIncludedObjectVersionsAll = "All"
+
+ // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value
+ InventoryIncludedObjectVersionsCurrent = "Current"
+)
+
+const (
+ // InventoryOptionalFieldSize is a InventoryOptionalField enum value
+ InventoryOptionalFieldSize = "Size"
+
+ // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value
+ InventoryOptionalFieldLastModifiedDate = "LastModifiedDate"
+
+ // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value
+ InventoryOptionalFieldStorageClass = "StorageClass"
+
+ // InventoryOptionalFieldEtag is a InventoryOptionalField enum value
+ InventoryOptionalFieldEtag = "ETag"
+
+ // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value
+ InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded"
+
+ // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value
+ InventoryOptionalFieldReplicationStatus = "ReplicationStatus"
+)
+
+const (
+ // MFADeleteEnabled is a MFADelete enum value
+ MFADeleteEnabled = "Enabled"
+
+ // MFADeleteDisabled is a MFADelete enum value
+ MFADeleteDisabled = "Disabled"
+)
+
+const (
+ // MFADeleteStatusEnabled is a MFADeleteStatus enum value
+ MFADeleteStatusEnabled = "Enabled"
+
+ // MFADeleteStatusDisabled is a MFADeleteStatus enum value
+ MFADeleteStatusDisabled = "Disabled"
+)
+
+const (
+ // MetadataDirectiveCopy is a MetadataDirective enum value
+ MetadataDirectiveCopy = "COPY"
+
+ // MetadataDirectiveReplace is a MetadataDirective enum value
+ MetadataDirectiveReplace = "REPLACE"
+)
+
+const (
+ // ObjectCannedACLPrivate is a ObjectCannedACL enum value
+ ObjectCannedACLPrivate = "private"
+
+ // ObjectCannedACLPublicRead is a ObjectCannedACL enum value
+ ObjectCannedACLPublicRead = "public-read"
+
+ // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value
+ ObjectCannedACLPublicReadWrite = "public-read-write"
+
+ // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value
+ ObjectCannedACLAuthenticatedRead = "authenticated-read"
+
+ // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value
+ ObjectCannedACLAwsExecRead = "aws-exec-read"
+
+ // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value
+ ObjectCannedACLBucketOwnerRead = "bucket-owner-read"
+
+ // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value
+ ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control"
+)
+
+const (
+ // ObjectStorageClassStandard is a ObjectStorageClass enum value
+ ObjectStorageClassStandard = "STANDARD"
+
+ // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value
+ ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY"
+
+ // ObjectStorageClassGlacier is a ObjectStorageClass enum value
+ ObjectStorageClassGlacier = "GLACIER"
+)
+
+const (
+ // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value
+ ObjectVersionStorageClassStandard = "STANDARD"
+)
+
+const (
+ // PayerRequester is a Payer enum value
+ PayerRequester = "Requester"
+
+ // PayerBucketOwner is a Payer enum value
+ PayerBucketOwner = "BucketOwner"
+)
+
+const (
+ // PermissionFullControl is a Permission enum value
+ PermissionFullControl = "FULL_CONTROL"
+
+ // PermissionWrite is a Permission enum value
+ PermissionWrite = "WRITE"
+
+ // PermissionWriteAcp is a Permission enum value
+ PermissionWriteAcp = "WRITE_ACP"
+
+ // PermissionRead is a Permission enum value
+ PermissionRead = "READ"
+
+ // PermissionReadAcp is a Permission enum value
+ PermissionReadAcp = "READ_ACP"
+)
+
+const (
+ // ProtocolHttp is a Protocol enum value
+ ProtocolHttp = "http"
+
+ // ProtocolHttps is a Protocol enum value
+ ProtocolHttps = "https"
+)
+
+const (
+ // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value
+ ReplicationRuleStatusEnabled = "Enabled"
+
+ // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value
+ ReplicationRuleStatusDisabled = "Disabled"
+)
+
+const (
+ // ReplicationStatusComplete is a ReplicationStatus enum value
+ ReplicationStatusComplete = "COMPLETE"
+
+ // ReplicationStatusPending is a ReplicationStatus enum value
+ ReplicationStatusPending = "PENDING"
+
+ // ReplicationStatusFailed is a ReplicationStatus enum value
+ ReplicationStatusFailed = "FAILED"
+
+ // ReplicationStatusReplica is a ReplicationStatus enum value
+ ReplicationStatusReplica = "REPLICA"
+)
+
+// If present, indicates that the requester was successfully charged for the
+// request.
+const (
+ // RequestChargedRequester is a RequestCharged enum value
+ RequestChargedRequester = "requester"
+)
+
+// Confirms that the requester knows that she or he will be charged for the
+// request. Bucket owners need not specify this parameter in their requests.
+// Documentation on downloading objects from requester pays buckets can be found
+// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+const (
+ // RequestPayerRequester is a RequestPayer enum value
+ RequestPayerRequester = "requester"
+)
+
+const (
+ // ServerSideEncryptionAes256 is a ServerSideEncryption enum value
+ ServerSideEncryptionAes256 = "AES256"
+
+ // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value
+ ServerSideEncryptionAwsKms = "aws:kms"
+)
+
+const (
+ // StorageClassStandard is a StorageClass enum value
+ StorageClassStandard = "STANDARD"
+
+ // StorageClassReducedRedundancy is a StorageClass enum value
+ StorageClassReducedRedundancy = "REDUCED_REDUNDANCY"
+
+ // StorageClassStandardIa is a StorageClass enum value
+ StorageClassStandardIa = "STANDARD_IA"
+)
+
+const (
+ // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value
+ StorageClassAnalysisSchemaVersionV1 = "V_1"
+)
+
+const (
+ // TaggingDirectiveCopy is a TaggingDirective enum value
+ TaggingDirectiveCopy = "COPY"
+
+ // TaggingDirectiveReplace is a TaggingDirective enum value
+ TaggingDirectiveReplace = "REPLACE"
+)
+
+const (
+ // TierStandard is a Tier enum value
+ TierStandard = "Standard"
+
+ // TierBulk is a Tier enum value
+ TierBulk = "Bulk"
+
+ // TierExpedited is a Tier enum value
+ TierExpedited = "Expedited"
+)
+
+const (
+ // TransitionStorageClassGlacier is a TransitionStorageClass enum value
+ TransitionStorageClassGlacier = "GLACIER"
+
+ // TransitionStorageClassStandardIa is a TransitionStorageClass enum value
+ TransitionStorageClassStandardIa = "STANDARD_IA"
+)
+
+const (
+ // TypeCanonicalUser is a Type enum value
+ TypeCanonicalUser = "CanonicalUser"
+
+ // TypeAmazonCustomerByEmail is a Type enum value
+ TypeAmazonCustomerByEmail = "AmazonCustomerByEmail"
+
+ // TypeGroup is a Type enum value
+ TypeGroup = "Group"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
new file mode 100644
index 00000000..bc68a46a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
@@ -0,0 +1,106 @@
+package s3
+
+import (
+ "io/ioutil"
+ "regexp"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
+
+// NormalizeBucketLocation is a utility function which will update the
+// passed in value to always be a region ID. Generally this would be used
+// with GetBucketLocation API operation.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+func NormalizeBucketLocation(loc string) string {
+ switch loc {
+ case "":
+ loc = "us-east-1"
+ case "EU":
+ loc = "eu-west-1"
+ }
+
+ return loc
+}
+
+// NormalizeBucketLocationHandler is a request handler which will update the
+// GetBucketLocation's result LocationConstraint value to always be a region ID.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+//
+// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
+// Bucket: aws.String(bucket),
+// })
+// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
+// err := req.Send()
+var NormalizeBucketLocationHandler = request.NamedHandler{
+ Name: "awssdk.s3.NormalizeBucketLocation",
+ Fn: func(req *request.Request) {
+ if req.Error != nil {
+ return
+ }
+
+ out := req.Data.(*GetBucketLocationOutput)
+ loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint))
+ out.LocationConstraint = aws.String(loc)
+ },
+}
+
+// WithNormalizeBucketLocation is a request option which will update the
+// GetBucketLocation's result LocationConstraint value to always be a region ID.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+//
+// result, err := svc.GetBucketLocationWithContext(ctx,
+// &s3.GetBucketLocationInput{
+// Bucket: aws.String(bucket),
+// },
+// s3.WithNormalizeBucketLocation,
+// )
+func WithNormalizeBucketLocation(r *request.Request) {
+ r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
+}
+
+func buildGetBucketLocation(r *request.Request) {
+ if r.DataFilled() {
+ out := r.Data.(*GetBucketLocationOutput)
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed reading response body", err)
+ return
+ }
+
+ match := reBucketLocation.FindSubmatch(b)
+ if len(match) > 1 {
+ loc := string(match[1])
+ out.LocationConstraint = aws.String(loc)
+ }
+ }
+}
+
+func populateLocationConstraint(r *request.Request) {
+ if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" {
+ in := r.Params.(*CreateBucketInput)
+ if in.CreateBucketConfiguration == nil {
+ r.Params = awsutil.CopyOf(r.Params)
+ in = r.Params.(*CreateBucketInput)
+ in.CreateBucketConfiguration = &CreateBucketConfiguration{
+ LocationConstraint: r.Config.Region,
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
new file mode 100644
index 00000000..9fc5df94
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
@@ -0,0 +1,36 @@
+package s3
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
+// require it.
+func contentMD5(r *request.Request) {
+ h := md5.New()
+
+ // hash the body. seek back to the first position after reading to reset
+ // the body for transmission. copy errors may be assumed to be from the
+ // body.
+ _, err := io.Copy(h, r.Body)
+ if err != nil {
+ r.Error = awserr.New("ContentMD5", "failed to read body", err)
+ return
+ }
+ _, err = r.Body.Seek(0, 0)
+ if err != nil {
+ r.Error = awserr.New("ContentMD5", "failed to seek body", err)
+ return
+ }
+
+ // encode the md5 checksum in base64 and set the request header.
+ sum := h.Sum(nil)
+ sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
+ base64.StdEncoding.Encode(sum64, sum)
+ r.HTTPRequest.Header.Set("Content-MD5", string(sum64))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
new file mode 100644
index 00000000..84633472
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
@@ -0,0 +1,46 @@
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func init() {
+ initClient = defaultInitClientFn
+ initRequest = defaultInitRequestFn
+}
+
+func defaultInitClientFn(c *client.Client) {
+ // Support building custom endpoints based on config
+ c.Handlers.Build.PushFront(updateEndpointForS3Config)
+
+ // Require SSL when using SSE keys
+ c.Handlers.Validate.PushBack(validateSSERequiresSSL)
+ c.Handlers.Build.PushBack(computeSSEKeys)
+
+ // S3 uses custom error unmarshaling logic
+ c.Handlers.UnmarshalError.Clear()
+ c.Handlers.UnmarshalError.PushBack(unmarshalError)
+}
+
+func defaultInitRequestFn(r *request.Request) {
+ // Add reuest handlers for specific platforms.
+ // e.g. 100-continue support for PUT requests using Go 1.6
+ platformRequestHandlers(r)
+
+ switch r.Operation.Name {
+ case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
+ opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
+ opPutBucketReplication:
+ // These S3 operations require Content-MD5 to be set
+ r.Handlers.Build.PushBack(contentMD5)
+ case opGetBucketLocation:
+ // GetBucketLocation has custom parsing logic
+ r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
+ case opCreateBucket:
+ // Auto-populate LocationConstraint with current region
+ r.Handlers.Validate.PushFront(populateLocationConstraint)
+ case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
+ r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
new file mode 100644
index 00000000..f045fd0d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
@@ -0,0 +1,78 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package s3 provides the client and types for making API
+// requests to Amazon Simple Storage Service.
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service.
+//
+// See s3 package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
+//
+// Using the Client
+//
+// To use the client for Amazon Simple Storage Service you will first need
+// to create a new instance of it.
+//
+// When creating a client for an AWS service you'll first need to have a Session
+// already created. The Session provides configuration that can be shared
+// between multiple service clients. Additional configuration can be applied to
+// the Session and service's client when they are constructed. The aws package's
+// Config type contains several fields such as Region for the AWS Region the
+// client should make API requests too. The optional Config value can be provided
+// as the variadic argument for Sessions and client creation.
+//
+// Once the service's client is created you can use it to make API requests the
+// AWS service. These clients are safe to use concurrently.
+//
+// // Create a session to share configuration, and load external configuration.
+// sess := session.Must(session.NewSession())
+//
+// // Create the service's client with the session.
+// svc := s3.New(sess)
+//
+// See the SDK's documentation for more information on how to use service clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws package's Config type for more information on configuration options.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the Amazon Simple Storage Service client S3 for more
+// information on creating the service's client.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
+//
+// Once the client is created you can make an API request to the service.
+// Each API method takes a input parameter, and returns the service response
+// and an error.
+//
+// The API method will document which error codes the service can be returned
+// by the operation if the service models the API operation's errors. These
+// errors will also be available as const strings prefixed with "ErrCode".
+//
+// result, err := svc.AbortMultipartUpload(params)
+// if err != nil {
+// // Cast err to awserr.Error to handle specific error codes.
+// aerr, ok := err.(awserr.Error)
+// if ok && aerr.Code() == <error code to check for> {
+// // Specific error code handling
+// }
+// return err
+// }
+//
+// fmt.Println("AbortMultipartUpload result:")
+// fmt.Println(result)
+//
+// Using the Client with Context
+//
+// The service's client also provides methods to make API requests with a Context
+// value. This allows you to control the timeout, and cancellation of pending
+// requests. These methods also take request Option as variadic parameter to apply
+// additional configuration to the API request.
+//
+// ctx := context.Background()
+//
+// result, err := svc.AbortMultipartUploadWithContext(ctx, params)
+//
+// See the request package documentation for more information on using Context pattern
+// with the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
+package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
new file mode 100644
index 00000000..b794a63b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
@@ -0,0 +1,109 @@
+// Upload Managers
+//
+// The s3manager package's Uploader provides concurrent upload of content to S3
+// by taking advantage of S3's Multipart APIs. The Uploader also supports both
+// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker
+// for optimizations if the Body satisfies that type. Once the Uploader instance
+// is created you can call Upload concurrently from multiple goroutines safely.
+//
+// // The session the S3 Uploader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create an uploader with the session and default options
+// uploader := s3manager.NewUploader(sess)
+//
+// f, err := os.Open(filename)
+// if err != nil {
+// return fmt.Errorf("failed to open file %q, %v", filename, err)
+// }
+//
+// // Upload the file to S3.
+// result, err := uploader.Upload(&s3manager.UploadInput{
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myString),
+// Body: f,
+// })
+// if err != nil {
+// return fmt.Errorf("failed to upload file, %v", err)
+// }
+// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
+//
+// See the s3manager package's Uploader type documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader
+//
+// Download Manager
+//
+// The s3manager package's Downloader provides concurrently downloading of Objects
+// from S3. The Downloader will write S3 Object content with an io.WriterAt.
+// Once the Downloader instance is created you can call Upload concurrently from
+// multiple goroutines safely.
+//
+// // The session the S3 Downloader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create a downloader with the session and default options
+// downloader := s3manager.NewDownloader(sess)
+//
+// // Create a file to write the S3 Object contents to.
+// f, err := os.Create(filename)
+// if err != nil {
+// return fmt.Errorf("failed to create file %q, %v", filename, err)
+// }
+//
+// // Write the contents of S3 Object to the file
+// n, err := downloader.Download(f, &s3.GetObjectInput{
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myString),
+// })
+// if err != nil {
+// return fmt.Errorf("failed to upload file, %v", err)
+// }
+// fmt.Printf("file downloaded, %d bytes\n", n)
+//
+// See the s3manager package's Downloader type documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
+//
+// Get Bucket Region
+//
+// GetBucketRegion will attempt to get the region for a bucket using a region
+// hint to determine which AWS partition to perform the query on. Use this utility
+// to determine the region a bucket is in.
+//
+// sess := session.Must(session.NewSession())
+//
+// bucket := "my-bucket"
+// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
+// if err != nil {
+// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
+// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
+// }
+// return err
+// }
+// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
+//
+// See the s3manager package's GetBucketRegion function documentation for more information
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion
+//
+// S3 Crypto Client
+//
+// The s3crypto package provides the tools to upload and download encrypted
+// content from S3. The Encryption and Decryption clients can be used concurrently
+// once the client is created.
+//
+// sess := session.Must(session.NewSession())
+//
+// // Create the decryption client.
+// svc := s3crypto.NewDecryptionClient(sess)
+//
+// // The object will be downloaded from S3 and decrypted locally. By metadata
+// // about the object's encryption will instruct the decryption client how
+// // decrypt the content of the object. By default KMS is used for keys.
+// result, err := svc.GetObject(&s3.GetObjectInput {
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myKey),
+// })
+//
+// See the s3crypto package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
+//
+package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
new file mode 100644
index 00000000..931cb17b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
@@ -0,0 +1,48 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+const (
+
+ // ErrCodeBucketAlreadyExists for service response error code
+ // "BucketAlreadyExists".
+ //
+ // The requested bucket name is not available. The bucket namespace is shared
+ // by all users of the system. Please select a different name and try again.
+ ErrCodeBucketAlreadyExists = "BucketAlreadyExists"
+
+ // ErrCodeBucketAlreadyOwnedByYou for service response error code
+ // "BucketAlreadyOwnedByYou".
+ ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
+
+ // ErrCodeNoSuchBucket for service response error code
+ // "NoSuchBucket".
+ //
+ // The specified bucket does not exist.
+ ErrCodeNoSuchBucket = "NoSuchBucket"
+
+ // ErrCodeNoSuchKey for service response error code
+ // "NoSuchKey".
+ //
+ // The specified key does not exist.
+ ErrCodeNoSuchKey = "NoSuchKey"
+
+ // ErrCodeNoSuchUpload for service response error code
+ // "NoSuchUpload".
+ //
+ // The specified multipart upload does not exist.
+ ErrCodeNoSuchUpload = "NoSuchUpload"
+
+ // ErrCodeObjectAlreadyInActiveTierError for service response error code
+ // "ObjectAlreadyInActiveTierError".
+ //
+ // This operation is not allowed against this storage tier
+ ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError"
+
+ // ErrCodeObjectNotInActiveTierError for service response error code
+ // "ObjectNotInActiveTierError".
+ //
+ // The source object of the COPY operation is not in the active tier and is
+ // only stored in Amazon Glacier.
+ ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
new file mode 100644
index 00000000..ec3ffe44
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
@@ -0,0 +1,162 @@
+package s3
+
+import (
+ "fmt"
+ "net/url"
+ "regexp"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// an operationBlacklist is a list of operation names that should a
+// request handler should not be executed with.
+type operationBlacklist []string
+
+// Continue will return true of the Request's operation name is not
+// in the blacklist. False otherwise.
+func (b operationBlacklist) Continue(r *request.Request) bool {
+ for i := 0; i < len(b); i++ {
+ if b[i] == r.Operation.Name {
+ return false
+ }
+ }
+ return true
+}
+
+var accelerateOpBlacklist = operationBlacklist{
+ opListBuckets, opCreateBucket, opDeleteBucket,
+}
+
+// Request handler to automatically add the bucket name to the endpoint domain
+// if possible. This style of bucket is valid for all bucket names which are
+// DNS compatible and do not contain "."
+func updateEndpointForS3Config(r *request.Request) {
+ forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
+ accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
+
+ if accelerate && accelerateOpBlacklist.Continue(r) {
+ if forceHostStyle {
+ if r.Config.Logger != nil {
+ r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
+ }
+ }
+ updateEndpointForAccelerate(r)
+ } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
+ updateEndpointForHostStyle(r)
+ }
+}
+
+func updateEndpointForHostStyle(r *request.Request) {
+ bucket, ok := bucketNameFromReqParams(r.Params)
+ if !ok {
+ // Ignore operation requests if the bucketname was not provided
+ // if this is an input validation error the validation handler
+ // will report it.
+ return
+ }
+
+ if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
+ // bucket name must be valid to put into the host
+ return
+ }
+
+ moveBucketToHost(r.HTTPRequest.URL, bucket)
+}
+
+var (
+ accelElem = []byte("s3-accelerate.dualstack.")
+)
+
+func updateEndpointForAccelerate(r *request.Request) {
+ bucket, ok := bucketNameFromReqParams(r.Params)
+ if !ok {
+ // Ignore operation requests if the bucketname was not provided
+ // if this is an input validation error the validation handler
+ // will report it.
+ return
+ }
+
+ if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
+ r.Error = awserr.New("InvalidParameterException",
+ fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket),
+ nil)
+ return
+ }
+
+ parts := strings.Split(r.HTTPRequest.URL.Host, ".")
+ if len(parts) < 3 {
+ r.Error = awserr.New("InvalidParameterExecption",
+ fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s",
+ r.HTTPRequest.URL.Host), nil)
+ return
+ }
+
+ if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") {
+ parts[0] = "s3-accelerate"
+ }
+ for i := 1; i+1 < len(parts); i++ {
+ if parts[i] == aws.StringValue(r.Config.Region) {
+ parts = append(parts[:i], parts[i+1:]...)
+ break
+ }
+ }
+
+ r.HTTPRequest.URL.Host = strings.Join(parts, ".")
+
+ moveBucketToHost(r.HTTPRequest.URL, bucket)
+}
+
+// Attempts to retrieve the bucket name from the request input parameters.
+// If no bucket is found, or the field is empty "", false will be returned.
+func bucketNameFromReqParams(params interface{}) (string, bool) {
+ b, _ := awsutil.ValuesAtPath(params, "Bucket")
+ if len(b) == 0 {
+ return "", false
+ }
+
+ if bucket, ok := b[0].(*string); ok {
+ if bucketStr := aws.StringValue(bucket); bucketStr != "" {
+ return bucketStr, true
+ }
+ }
+
+ return "", false
+}
+
+// hostCompatibleBucketName returns true if the request should
+// put the bucket in the host. This is false if S3ForcePathStyle is
+// explicitly set or if the bucket is not DNS compatible.
+func hostCompatibleBucketName(u *url.URL, bucket string) bool {
+ // Bucket might be DNS compatible but dots in the hostname will fail
+ // certificate validation, so do not use host-style.
+ if u.Scheme == "https" && strings.Contains(bucket, ".") {
+ return false
+ }
+
+ // if the bucket is DNS compatible
+ return dnsCompatibleBucketName(bucket)
+}
+
+var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
+
+// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
+// Buckets created outside of the classic region MUST be DNS compatible.
+func dnsCompatibleBucketName(bucket string) bool {
+ return reDomain.MatchString(bucket) &&
+ !reIPAddress.MatchString(bucket) &&
+ !strings.Contains(bucket, "..")
+}
+
+// moveBucketToHost moves the bucket name from the URI path to URL host.
+func moveBucketToHost(u *url.URL, bucket string) {
+ u.Host = bucket + "." + u.Host
+ u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
+ if u.Path == "" {
+ u.Path = "/"
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
new file mode 100644
index 00000000..8e6f3307
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
@@ -0,0 +1,8 @@
+// +build !go1.6
+
+package s3
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+func platformRequestHandlers(r *request.Request) {
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
new file mode 100644
index 00000000..14d05f7b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
@@ -0,0 +1,28 @@
+// +build go1.6
+
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func platformRequestHandlers(r *request.Request) {
+ if r.Operation.HTTPMethod == "PUT" {
+ // 100-Continue should only be used on put requests.
+ r.Handlers.Sign.PushBack(add100Continue)
+ }
+}
+
+func add100Continue(r *request.Request) {
+ if aws.BoolValue(r.Config.S3Disable100Continue) {
+ return
+ }
+ if r.HTTPRequest.ContentLength < 1024*1024*2 {
+ // Ignore requests smaller than 2MB. This helps prevent delaying
+ // requests unnecessarily.
+ return
+ }
+
+ r.HTTPRequest.Header.Set("Expect", "100-Continue")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
new file mode 100644
index 00000000..614e477d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
@@ -0,0 +1,93 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/restxml"
+)
+
+// S3 provides the API operation methods for making requests to
+// Amazon Simple Storage Service. See this package's package overview docs
+// for details on the service.
+//
+// S3 methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type S3 struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "s3" // Service endpoint prefix API calls made to.
+ EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
+)
+
+// New creates a new instance of the S3 client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a S3 client from just a session.
+// svc := s3.New(mySession)
+//
+// // Create a S3 client with additional configuration
+// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
+ svc := &S3{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2006-03-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a S3 operation and runs any
+// custom request initialization.
+func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
new file mode 100644
index 00000000..268ea2fb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
@@ -0,0 +1,44 @@
+package s3
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
+
+func validateSSERequiresSSL(r *request.Request) {
+ if r.HTTPRequest.URL.Scheme != "https" {
+ p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey")
+ if len(p) > 0 {
+ r.Error = errSSERequiresSSL
+ }
+ }
+}
+
+func computeSSEKeys(r *request.Request) {
+ headers := []string{
+ "x-amz-server-side-encryption-customer-key",
+ "x-amz-copy-source-server-side-encryption-customer-key",
+ }
+
+ for _, h := range headers {
+ md5h := h + "-md5"
+ if key := r.HTTPRequest.Header.Get(h); key != "" {
+ // Base64-encode the value
+ b64v := base64.StdEncoding.EncodeToString([]byte(key))
+ r.HTTPRequest.Header.Set(h, b64v)
+
+ // Add MD5 if it wasn't computed
+ if r.HTTPRequest.Header.Get(md5h) == "" {
+ sum := md5.Sum([]byte(key))
+ b64sum := base64.StdEncoding.EncodeToString(sum[:])
+ r.HTTPRequest.Header.Set(md5h, b64sum)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
new file mode 100644
index 00000000..5a78fd33
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
@@ -0,0 +1,35 @@
+package s3
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "unable to read response body", err)
+ return
+ }
+ body := bytes.NewReader(b)
+ r.HTTPResponse.Body = ioutil.NopCloser(body)
+ defer body.Seek(0, 0)
+
+ if body.Len() == 0 {
+ // If there is no body don't attempt to parse the body.
+ return
+ }
+
+ unmarshalError(r)
+ if err, ok := r.Error.(awserr.Error); ok && err != nil {
+ if err.Code() == "SerializationError" {
+ r.Error = nil
+ return
+ }
+ r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
new file mode 100644
index 00000000..bcca8627
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
@@ -0,0 +1,103 @@
+package s3
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+type xmlErrorResponse struct {
+ XMLName xml.Name `xml:"Error"`
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+
+ hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2")
+
+ // Bucket exists in a different region, and request needs
+ // to be made to the correct region.
+ if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
+ r.Error = requestFailure{
+ RequestFailure: awserr.NewRequestFailure(
+ awserr.New("BucketRegionError",
+ fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
+ aws.StringValue(r.Config.Region)),
+ nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ ),
+ hostID: hostID,
+ }
+ return
+ }
+
+ var errCode, errMsg string
+
+ // Attempt to parse error from body if it is known
+ resp := &xmlErrorResponse{}
+ err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
+ if err != nil && err != io.EOF {
+ errCode = "SerializationError"
+ errMsg = "failed to decode S3 XML error response"
+ } else {
+ errCode = resp.Code
+ errMsg = resp.Message
+ err = nil
+ }
+
+ // Fallback to status code converted to message if still no error code
+ if len(errCode) == 0 {
+ statusText := http.StatusText(r.HTTPResponse.StatusCode)
+ errCode = strings.Replace(statusText, " ", "", -1)
+ errMsg = statusText
+ }
+
+ r.Error = requestFailure{
+ RequestFailure: awserr.NewRequestFailure(
+ awserr.New(errCode, errMsg, err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ ),
+ hostID: hostID,
+ }
+}
+
+// A RequestFailure provides access to the S3 Request ID and Host ID values
+// returned from API operation errors. Getting the error as a string will
+// return the formated error with the same information as awserr.RequestFailure,
+// while also adding the HostID value from the response.
+type RequestFailure interface {
+ awserr.RequestFailure
+
+ // Host ID is the S3 Host ID needed for debug, and contacting support
+ HostID() string
+}
+
+type requestFailure struct {
+ awserr.RequestFailure
+
+ hostID string
+}
+
+func (r requestFailure) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s",
+ r.StatusCode(), r.RequestID(), r.hostID)
+ return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+func (r requestFailure) String() string {
+ return r.Error()
+}
+func (r requestFailure) HostID() string {
+ return r.hostID
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
new file mode 100644
index 00000000..cccfa8c2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
@@ -0,0 +1,214 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// WaitUntilBucketExists uses the Amazon S3 API operation
+// HeadBucket to wait for a condition to be met before returning.
+// If the condition is not meet within the max attempt window an error will
+// be returned.
+func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
+ return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilBucketExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 200,
+ },
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 301,
+ },
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 403,
+ },
+ {
+ State: request.RetryWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadBucketInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadBucketRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilBucketNotExists uses the Amazon S3 API operation
+// HeadBucket to wait for a condition to be met before returning.
+// If the condition is not meet within the max attempt window an error will
+// be returned.
+func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
+ return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilBucketNotExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadBucketInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadBucketRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilObjectExists uses the Amazon S3 API operation
+// HeadObject to wait for a condition to be met before returning.
+// If the condition is not meet within the max attempt window an error will
+// be returned.
+func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
+ return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilObjectExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 200,
+ },
+ {
+ State: request.RetryWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadObjectInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadObjectRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilObjectNotExists uses the Amazon S3 API operation
+// HeadObject to wait for a condition to be met before returning.
+// If the condition is not meet within the max attempt window an error will
+// be returned.
+func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
+ return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilObjectNotExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadObjectInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadObjectRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
new file mode 100644
index 00000000..2de65288
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -0,0 +1,2365 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const opAssumeRole = "AssumeRole"
+
+// AssumeRoleRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRole operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See AssumeRole for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the AssumeRole method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the AssumeRoleRequest method.
+// req, resp := client.AssumeRoleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
+ op := &request.Operation{
+ Name: opAssumeRole,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleInput{}
+ }
+
+ output = &AssumeRoleOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AssumeRole API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) that you can use to access
+// AWS resources that you might not normally have access to. Typically, you
+// use AssumeRole for cross-account access or federation. For a comparison of
+// AssumeRole with the other APIs that produce temporary credentials, see Requesting
+// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// Important: You cannot call AssumeRole by using AWS root account credentials;
+// access is denied. You must use credentials for an IAM user or an IAM role
+// to call AssumeRole.
+//
+// For cross-account access, imagine that you own multiple accounts and need
+// to access resources in each account. You could create long-term credentials
+// in each account to access those resources. However, managing all those credentials
+// and remembering which one can access which account can be time consuming.
+// Instead, you can create one set of long-term credentials in one account and
+// then use temporary security credentials to access all the other accounts
+// by assuming roles in those accounts. For more information about roles, see
+// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html)
+// in the IAM User Guide.
+//
+// For federation, you can, for example, grant single sign-on access to the
+// AWS Management Console. If you already have an identity and authentication
+// system in your corporate network, you don't have to recreate user identities
+// in AWS in order to grant those user identities access to AWS. Instead, after
+// a user has been authenticated, you call AssumeRole (and specify the role
+// with the appropriate permissions) to get temporary security credentials for
+// that user. With those temporary security credentials, you construct a sign-in
+// URL that users can use to access the console. For more information, see Common
+// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
+// in the IAM User Guide.
+//
+// The temporary security credentials are valid for the duration that you specified
+// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a
+// maximum of 3600 seconds (1 hour). The default is 1 hour.
+//
+// The temporary security credentials created by AssumeRole can be used to make
+// API calls to any AWS service with the following exception: you cannot call
+// the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// To assume a role, your AWS account must be trusted by the role. The trust
+// relationship is defined in the role's trust policy when the role is created.
+// That trust policy states which accounts are allowed to delegate access to
+// this account's role.
+//
+// The user who wants to access the role must also have permissions delegated
+// from the role's administrator. If the user is in a different account than
+// the role, then the user's administrator must attach a policy that allows
+// the user to call AssumeRole on the ARN of the role in the other account.
+// If the user is in the same account as the role, then you can either attach
+// a policy to the user (identical to the previous different account user),
+// or you can add the user as a principal directly in the role's trust policy
+//
+// Using MFA with AssumeRole
+//
+// You can optionally include multi-factor authentication (MFA) information
+// when you call AssumeRole. This is useful for cross-account scenarios in which
+// you want to make sure that the user who is assuming the role has been authenticated
+// using an AWS MFA device. In that scenario, the trust policy of the role being
+// assumed includes a condition that tests for MFA authentication; if the caller
+// does not include valid MFA information, the request to assume the role is
+// denied. The condition in a trust policy that tests for MFA authentication
+// might look like the following example.
+//
+// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
+// in the IAM User Guide guide.
+//
+// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that
+// the MFA devices produces.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRole for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
+ req, out := c.AssumeRoleRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithContext is the same as AssumeRole with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRole for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) {
+ req, out := c.AssumeRoleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
+
+// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRoleWithSAML operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See AssumeRoleWithSAML for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the AssumeRoleWithSAML method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the AssumeRoleWithSAMLRequest method.
+// req, resp := client.AssumeRoleWithSAMLRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
+ op := &request.Operation{
+ Name: opAssumeRoleWithSAML,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithSAMLInput{}
+ }
+
+ output = &AssumeRoleWithSAMLOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AssumeRoleWithSAML API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// via a SAML authentication response. This operation provides a mechanism for
+// tying an enterprise identity store or directory to role-based AWS access
+// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML
+// with the other APIs that produce temporary credentials, see Requesting Temporary
+// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this operation consist of
+// an access key ID, a secret access key, and a security token. Applications
+// can use these temporary security credentials to sign calls to AWS services.
+//
+// The temporary security credentials are valid for the duration that you specified
+// when calling AssumeRole, or until the time specified in the SAML authentication
+// response's SessionNotOnOrAfter value, whichever is shorter. The duration
+// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour).
+// The default is 1 hour.
+//
+// The temporary security credentials created by AssumeRoleWithSAML can be used
+// to make API calls to any AWS service with the following exception: you cannot
+// call the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by the intersection of both the access policy
+// of the role that is being assumed, and the policy that you pass. This means
+// that both policies must grant the permission for the action to be allowed.
+// This gives you a way to further restrict the permissions for the resulting
+// temporary security credentials. You cannot use the passed policy to grant
+// permissions that are in excess of those allowed by the access policy of the
+// role that is being assumed. For more information, see Permissions for AssumeRole,
+// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithSAML, you must configure your
+// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
+// you must use AWS Identity and Access Management (IAM) to create a SAML provider
+// entity in your AWS account that represents your identity provider, and create
+// an IAM role that specifies this SAML provider in its trust policy.
+//
+// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
+// The identity of the caller is validated by using keys in the metadata document
+// that is uploaded for the SAML provider entity for your identity provider.
+//
+// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
+// logs. The entry includes the value in the NameID element of the SAML assertion.
+// We recommend that you use a NameIDType that is not associated with any personally
+// identifiable information (PII). For example, you could instead use the Persistent
+// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
+//
+// For more information, see the following resources:
+//
+// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
+// in the IAM User Guide.
+//
+// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
+// in the IAM User Guide.
+//
+// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
+// in the IAM User Guide.
+//
+// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
+// in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithSAML for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
+// The identity provider (IdP) reported that authentication failed. This might
+// be because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it
+// can also mean that the claim has expired or has been explicitly revoked.
+//
+// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
+// The web identity token that was passed could not be validated by AWS. Get
+// a new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeExpiredTokenException "ExpiredTokenException"
+// The web identity token that was passed is expired or is not valid. Get a
+// new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
+ req, out := c.AssumeRoleWithSAMLRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithSAML for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) {
+ req, out := c.AssumeRoleWithSAMLRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
+
+// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See AssumeRoleWithWebIdentity for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the AssumeRoleWithWebIdentity method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
+// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
+ op := &request.Operation{
+ Name: opAssumeRoleWithWebIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithWebIdentityInput{}
+ }
+
+ output = &AssumeRoleWithWebIdentityOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AssumeRoleWithWebIdentity API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// in a mobile or web application with a web identity provider, such as Amazon
+// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible
+// identity provider.
+//
+// For mobile applications, we recommend that you use Amazon Cognito. You can
+// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/)
+// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely
+// identify a user and supply the user with a consistent identity throughout
+// the lifetime of an application.
+//
+// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
+// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview
+// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
+// in the AWS SDK for iOS Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
+// credentials. Therefore, you can distribute an application (for example, on
+// mobile devices) that requests temporary security credentials without including
+// long-term AWS credentials in the application, and without deploying server-based
+// proxy services that use long-term AWS credentials. Instead, the identity
+// of the caller is validated by using a token from the web identity provider.
+// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce
+// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to AWS service APIs.
+//
+// The credentials are valid for the duration that you specified when calling
+// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to
+// a maximum of 3600 seconds (1 hour). The default is 1 hour.
+//
+// The temporary security credentials created by AssumeRoleWithWebIdentity can
+// be used to make API calls to any AWS service with the following exception:
+// you cannot call the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithWebIdentity, you must have
+// an identity token from a supported identity provider and create a role that
+// the application can assume. The role that your application assumes must trust
+// the identity provider that is associated with the identity token. In other
+// words, the identity provider must be specified in the role's trust policy.
+//
+// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
+// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
+// of the provided Web Identity Token. We recommend that you avoid using any
+// personally identifiable information (PII) in this field. For example, you
+// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
+// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
+//
+// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
+// API, see the following resources:
+//
+// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
+// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+//
+// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
+// This interactive website lets you walk through the process of authenticating
+// via Login with Amazon, Facebook, or Google, getting temporary security
+// credentials, and then using those credentials to make a request to AWS.
+//
+//
+// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android
+// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample
+// apps that show how to invoke the identity providers, and then how to use
+// the information from these providers to get and use temporary security
+// credentials.
+//
+// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313).
+// This article discusses web identity federation and shows an example of
+// how to use web identity federation to get access to content in Amazon
+// S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithWebIdentity for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
+// The identity provider (IdP) reported that authentication failed. This might
+// be because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it
+// can also mean that the claim has expired or has been explicitly revoked.
+//
+// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
+// The request could not be fulfilled because the non-AWS identity provider
+// (IDP) that was asked to verify the incoming identity token could not be reached.
+// This is often a transient error caused by network conditions. Retry the request
+// a limited number of times so that you don't exceed the request rate. If the
+// error persists, the non-AWS identity provider might be down or not responding.
+//
+// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
+// The web identity token that was passed could not be validated by AWS. Get
+// a new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeExpiredTokenException "ExpiredTokenException"
+// The web identity token that was passed is expired or is not valid. Get a
+// new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
+ req, out := c.AssumeRoleWithWebIdentityRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithWebIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) {
+ req, out := c.AssumeRoleWithWebIdentityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
+
+// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
+// client's request for the DecodeAuthorizationMessage operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DecodeAuthorizationMessage for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DecodeAuthorizationMessage method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DecodeAuthorizationMessageRequest method.
+// req, resp := client.DecodeAuthorizationMessageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
+ op := &request.Operation{
+ Name: opDecodeAuthorizationMessage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DecodeAuthorizationMessageInput{}
+ }
+
+ output = &DecodeAuthorizationMessageOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DecodeAuthorizationMessage API operation for AWS Security Token Service.
+//
+// Decodes additional information about the authorization status of a request
+// from an encoded message returned in response to an AWS request.
+//
+// For example, if a user is not authorized to perform an action that he or
+// she has requested, the request returns a Client.UnauthorizedOperation response
+// (an HTTP 403 response). Some AWS actions additionally return an encoded message
+// that can provide details about this authorization failure.
+//
+// Only certain AWS actions return an encoded authorization message. The documentation
+// for an individual action indicates whether that action returns an encoded
+// message in addition to returning an HTTP code.
+//
+// The message is encoded because the details of the authorization status can
+// constitute privileged information that the user who requested the action
+// should not see. To decode an authorization status message, a user must be
+// granted permissions via an IAM policy to request the DecodeAuthorizationMessage
+// (sts:DecodeAuthorizationMessage) action.
+//
+// The decoded message includes the following type of information:
+//
+// * Whether the request was denied due to an explicit deny or due to the
+// absence of an explicit allow. For more information, see Determining Whether
+// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
+// in the IAM User Guide.
+//
+// * The principal who made the request.
+//
+// * The requested action.
+//
+// * The requested resource.
+//
+// * The values of condition keys in the context of the user's request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation DecodeAuthorizationMessage for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
+// The error returned if the message passed to DecodeAuthorizationMessage was
+// invalid. This can happen if the token contains invalid characters, such as
+// linebreaks.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
+ req, out := c.DecodeAuthorizationMessageRequest(input)
+ return out, req.Send()
+}
+
+// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DecodeAuthorizationMessage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) {
+ req, out := c.DecodeAuthorizationMessageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetCallerIdentity = "GetCallerIdentity"
+
+// GetCallerIdentityRequest generates a "aws/request.Request" representing the
+// client's request for the GetCallerIdentity operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetCallerIdentity for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetCallerIdentity method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetCallerIdentityRequest method.
+// req, resp := client.GetCallerIdentityRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
+ op := &request.Operation{
+ Name: opGetCallerIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetCallerIdentityInput{}
+ }
+
+ output = &GetCallerIdentityOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetCallerIdentity API operation for AWS Security Token Service.
+//
+// Returns details about the IAM identity whose credentials are used to call
+// the API.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetCallerIdentity for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
+ req, out := c.GetCallerIdentityRequest(input)
+ return out, req.Send()
+}
+
+// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetCallerIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) {
+ req, out := c.GetCallerIdentityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetFederationToken = "GetFederationToken"
+
+// GetFederationTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetFederationToken operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetFederationToken for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetFederationToken method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetFederationTokenRequest method.
+// req, resp := client.GetFederationTokenRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
+ op := &request.Operation{
+ Name: opGetFederationToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetFederationTokenInput{}
+ }
+
+ output = &GetFederationTokenOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetFederationToken API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) for a federated user.
+// A typical use is in a proxy application that gets temporary security credentials
+// on behalf of distributed applications inside a corporate network. Because
+// you must call the GetFederationToken action using the long-term security
+// credentials of an IAM user, this call is appropriate in contexts where those
+// credentials can be safely stored, usually in a server-based application.
+// For a comparison of GetFederationToken with the other APIs that produce temporary
+// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// If you are creating a mobile-based or browser-based app that can authenticate
+// users using a web identity provider like Login with Amazon, Facebook, Google,
+// or an OpenID Connect-compatible identity provider, we recommend that you
+// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
+// For more information, see Federation Through a Web-based Identity Provider
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+// The GetFederationToken action must be called by using the long-term AWS security
+// credentials of an IAM user. You can also call GetFederationToken using the
+// security credentials of an AWS root account, but we do not recommended it.
+// Instead, we recommend that you create an IAM user for the purpose of the
+// proxy application and then attach a policy to the IAM user that limits federated
+// users to only the actions and resources that they need access to. For more
+// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials that are obtained by using the long-term
+// credentials of an IAM user are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default
+// is 43200 seconds (12 hours). Temporary credentials that are obtained by using
+// AWS root account credentials have a maximum duration of 3600 seconds (1 hour).
+//
+// The temporary security credentials created by GetFederationToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+// * You cannot use these credentials to call any IAM APIs.
+//
+// * You cannot call any STS APIs except GetCallerIdentity.
+//
+// Permissions
+//
+// The permissions for the temporary security credentials returned by GetFederationToken
+// are determined by a combination of the following:
+//
+// * The policy or policies that are attached to the IAM user whose credentials
+// are used to call GetFederationToken.
+//
+// * The policy that is passed as a parameter in the call.
+//
+// The passed policy is attached to the temporary security credentials that
+// result from the GetFederationToken API call--that is, to the federated user.
+// When the federated user makes an AWS request, AWS evaluates the policy attached
+// to the federated user in combination with the policy or policies attached
+// to the IAM user whose credentials were used to call GetFederationToken. AWS
+// allows the federated user's request only when both the federated user and
+// the IAM user are explicitly allowed to perform the requested action. The
+// passed policy cannot grant more permissions than those that are defined in
+// the IAM user policy.
+//
+// A typical use case is that the permissions of the IAM user whose credentials
+// are used to call GetFederationToken are designed to allow access to all the
+// actions and resources that any federated user will need. Then, for individual
+// users, you pass a policy to the operation that scopes down the permissions
+// to a level that's appropriate to that individual user, using a policy that
+// allows only a subset of permissions that are granted to the IAM user.
+//
+// If you do not pass a policy, the resulting temporary security credentials
+// have no effective permissions. The only exception is when the temporary security
+// credentials are used to access a resource that has a resource-based policy
+// that specifically allows the federated user to access the resource.
+//
+// For more information about how permissions work, see Permissions for GetFederationToken
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+// For information about using GetFederationToken to create temporary security
+// credentials, see GetFederationToken—Federation Through a Custom Identity
+// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetFederationToken for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
+ req, out := c.GetFederationTokenRequest(input)
+ return out, req.Send()
+}
+
+// GetFederationTokenWithContext is the same as GetFederationToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetFederationToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) {
+ req, out := c.GetFederationTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetSessionToken = "GetSessionToken"
+
+// GetSessionTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetSessionToken operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetSessionToken for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetSessionToken method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetSessionTokenRequest method.
+// req, resp := client.GetSessionTokenRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
+ op := &request.Operation{
+ Name: opGetSessionToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetSessionTokenInput{}
+ }
+
+ output = &GetSessionTokenOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetSessionToken API operation for AWS Security Token Service.
+//
+// Returns a set of temporary credentials for an AWS account or IAM user. The
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use GetSessionToken if you want to use MFA to protect
+// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled
+// IAM users would need to call GetSessionToken and submit an MFA code that
+// is associated with their MFA device. Using the temporary security credentials
+// that are returned from the call, IAM users can then make programmatic calls
+// to APIs that require MFA authentication. If you do not supply a correct MFA
+// code, then the API returns an access denied error. For a comparison of GetSessionToken
+// with the other APIs that produce temporary credentials, see Requesting Temporary
+// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The GetSessionToken action must be called by using the long-term AWS security
+// credentials of the AWS account or an IAM user. Credentials that are created
+// by IAM users are valid for the duration that you specify, from 900 seconds
+// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default
+// of 43200 seconds (12 hours); credentials that are created by using account
+// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600
+// seconds (1 hour), with a default of 1 hour.
+//
+// The temporary security credentials created by GetSessionToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+// * You cannot call any IAM APIs unless MFA authentication information is
+// included in the request.
+//
+// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity.
+//
+// We recommend that you do not call GetSessionToken with root account credentials.
+// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
+// by creating one or more IAM users, giving them the necessary permissions,
+// and using IAM users for everyday interaction with AWS.
+//
+// The permissions associated with the temporary security credentials returned
+// by GetSessionToken are based on the permissions associated with account or
+// IAM user whose credentials are used to call the action. If GetSessionToken
+// is called using root account credentials, the temporary credentials have
+// root account permissions. Similarly, if GetSessionToken is called using the
+// credentials of an IAM user, the temporary credentials have the same permissions
+// as the IAM user.
+//
+// For more information about using GetSessionToken to create temporary credentials,
+// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
+// in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetSessionToken for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
+ req, out := c.GetSessionTokenRequest(input)
+ return out, req.Send()
+}
+
+// GetSessionTokenWithContext is the same as GetSessionToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetSessionToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) {
+ req, out := c.GetSessionTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest
+type AssumeRoleInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
+ // to 3600 seconds.
+ //
+ // This is separate from the duration of a console session that you might request
+ // using the returned credentials. The request to the federation endpoint for
+ // a console sign-in token takes a SessionDuration parameter that specifies
+ // the maximum length of the console session, separately from the DurationSeconds
+ // parameter on this API. For more information, see Creating a URL that Enables
+ // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // A unique identifier that is used by third parties when assuming roles in
+ // their customers' accounts. For each role that the third party can assume,
+ // they should instruct their customers to ensure the role's trust policy checks
+ // for the external ID that the third party generated. Each time the third party
+ // assumes the role, they should pass the customer's external ID. The external
+ // ID is useful in order to help third parties bind a role to the customer who
+ // created it. For more information about the external ID, see How to Use an
+ // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
+ // in the IAM User Guide.
+ //
+ // The regex used to validated this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@:\/-
+ ExternalId *string `min:"2" type:"string"`
+
+ // An IAM policy in JSON format.
+ //
+ // This parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both (the intersection of) the access policy of the role that
+ // is being assumed, and the policy that you pass. This gives you a way to further
+ // restrict the permissions for the resulting temporary security credentials.
+ // You cannot use the passed policy to grant permissions that are in excess
+ // of those allowed by the access policy of the role that is being assumed.
+ // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+ // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the IAM User Guide.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the role to assume.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session.
+ //
+ // Use the role session name to uniquely identify a session when the same role
+ // is assumed by different principals or for different reasons. In cross-account
+ // scenarios, the role session name is visible to, and can be logged by the
+ // account that owns the role. The role session name is also used in the ARN
+ // of the assumed role principal. This means that subsequent cross-account API
+ // requests using the temporary security credentials will expose the role session
+ // name to the external account in their CloudTrail logs.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // RoleSessionName is a required field
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.ExternalId != nil && len(*s.ExternalId) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.RoleSessionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
+ }
+ if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
+ }
+ if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+ invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
+ }
+ if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+ invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetExternalId sets the ExternalId field's value.
+func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput {
+ s.ExternalId = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput {
+ s.Policy = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput {
+ s.RoleSessionName = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
+ s.TokenCode = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRole request, including temporary
+// AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse
+type AssumeRoleOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s AssumeRoleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest
+type AssumeRoleWithSAMLInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
+ // to 3600 seconds. An expiration can also be specified in the SAML authentication
+ // response's SessionNotOnOrAfter value. The actual expiration time is whichever
+ // value is shorter.
+ //
+ // This is separate from the duration of a console session that you might request
+ // using the returned credentials. The request to the federation endpoint for
+ // a console sign-in token takes a SessionDuration parameter that specifies
+ // the maximum length of the console session, separately from the DurationSeconds
+ // parameter on this API. For more information, see Enabling SAML 2.0 Federated
+ // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format.
+ //
+ // The policy parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both the access policy of the role that is being assumed,
+ // and the policy that you pass. This gives you a way to further restrict the
+ // permissions for the resulting temporary security credentials. You cannot
+ // use the passed policy to grant permissions that are in excess of those allowed
+ // by the access policy of the role that is being assumed. For more information,
+ // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
+ // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the IAM User Guide.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
+ // the IdP.
+ //
+ // PrincipalArn is a required field
+ PrincipalArn *string `min:"20" type:"string" required:"true"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // The base-64 encoded SAML authentication response provided by the IdP.
+ //
+ // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
+ // in the Using IAM guide.
+ //
+ // SAMLAssertion is a required field
+ SAMLAssertion *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithSAMLInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.PrincipalArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("PrincipalArn"))
+ }
+ if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.SAMLAssertion == nil {
+ invalidParams.Add(request.NewErrParamRequired("SAMLAssertion"))
+ }
+ if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPrincipalArn sets the PrincipalArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput {
+ s.PrincipalArn = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetSAMLAssertion sets the SAMLAssertion field's value.
+func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput {
+ s.SAMLAssertion = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRoleWithSAML request, including
+// temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse
+type AssumeRoleWithSAMLOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The identifiers for the temporary security credentials that the operation
+ // returns.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The value of the Recipient attribute of the SubjectConfirmationData element
+ // of the SAML assertion.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // The value of the Issuer element of the SAML assertion.
+ Issuer *string `type:"string"`
+
+ // A hash value based on the concatenation of the Issuer response value, the
+ // AWS account ID, and the friendly name (the last part of the ARN) of the SAML
+ // provider in IAM. The combination of NameQualifier and Subject can be used
+ // to uniquely identify a federated user.
+ //
+ // The following pseudocode shows how the hash value is calculated:
+ //
+ // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
+ // ) )
+ NameQualifier *string `type:"string"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The value of the NameID element in the Subject element of the SAML assertion.
+ Subject *string `type:"string"`
+
+ // The format of the name ID, as defined by the Format attribute in the NameID
+ // element of the SAML assertion. Typical examples of the format are transient
+ // or persistent.
+ //
+ // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
+ // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
+ // is returned as transient. If the format includes any other prefix, the format
+ // is returned with no modifications.
+ SubjectType *string `type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput {
+ s.Audience = &v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetIssuer sets the Issuer field's value.
+func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput {
+ s.Issuer = &v
+ return s
+}
+
+// SetNameQualifier sets the NameQualifier field's value.
+func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput {
+ s.NameQualifier = &v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// SetSubject sets the Subject field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput {
+ s.Subject = &v
+ return s
+}
+
+// SetSubjectType sets the SubjectType field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput {
+ s.SubjectType = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest
+type AssumeRoleWithWebIdentityInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
+ // to 3600 seconds.
+ //
+ // This is separate from the duration of a console session that you might request
+ // using the returned credentials. The request to the federation endpoint for
+ // a console sign-in token takes a SessionDuration parameter that specifies
+ // the maximum length of the console session, separately from the DurationSeconds
+ // parameter on this API. For more information, see Creating a URL that Enables
+ // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format.
+ //
+ // The policy parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both the access policy of the role that is being assumed,
+ // and the policy that you pass. This gives you a way to further restrict the
+ // permissions for the resulting temporary security credentials. You cannot
+ // use the passed policy to grant permissions that are in excess of those allowed
+ // by the access policy of the role that is being assumed. For more information,
+ // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the IAM User Guide.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The fully qualified host component of the domain name of the identity provider.
+ //
+ // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
+ // and graph.facebook.com are the only supported identity providers for OAuth
+ // 2.0 access tokens. Do not include URL schemes and port numbers.
+ //
+ // Do not specify this value for OpenID Connect ID tokens.
+ ProviderId *string `min:"4" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session. Typically, you pass the name
+ // or identifier that is associated with the user who is using your application.
+ // That way, the temporary security credentials that your application will use
+ // are associated with that user. This session name is included as part of the
+ // ARN and assumed role ID in the AssumedRoleUser response element.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // RoleSessionName is a required field
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The OAuth 2.0 access token or OpenID Connect ID token that is provided by
+ // the identity provider. Your application must get this token by authenticating
+ // the user who is using your application with a web identity provider before
+ // the application makes an AssumeRoleWithWebIdentity call.
+ //
+ // WebIdentityToken is a required field
+ WebIdentityToken *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithWebIdentityInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.ProviderId != nil && len(*s.ProviderId) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.RoleSessionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
+ }
+ if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
+ }
+ if s.WebIdentityToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("WebIdentityToken"))
+ }
+ if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput {
+ s.Policy = &v
+ return s
+}
+
+// SetProviderId sets the ProviderId field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput {
+ s.ProviderId = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput {
+ s.RoleSessionName = &v
+ return s
+}
+
+// SetWebIdentityToken sets the WebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput {
+ s.WebIdentityToken = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRoleWithWebIdentity request,
+// including temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse
+type AssumeRoleWithWebIdentityOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The intended audience (also known as client ID) of the web identity token.
+ // This is traditionally the client identifier issued to the application that
+ // requested the web identity token.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The issuing authority of the web identity token presented. For OpenID Connect
+ // ID Tokens this contains the value of the iss field. For OAuth 2.0 access
+ // tokens, this contains the value of the ProviderId parameter that was passed
+ // in the AssumeRoleWithWebIdentity request.
+ Provider *string `type:"string"`
+
+ // The unique user identifier that is returned by the identity provider. This
+ // identifier is associated with the WebIdentityToken that was submitted with
+ // the AssumeRoleWithWebIdentity call. The identifier is typically unique to
+ // the user and the application that acquired the WebIdentityToken (pairwise
+ // identifier). For OpenID Connect ID tokens, this field contains the value
+ // returned by the identity provider as the token's sub (Subject) claim.
+ SubjectFromWebIdentityToken *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput {
+ s.Audience = &v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// SetProvider sets the Provider field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput {
+ s.Provider = &v
+ return s
+}
+
+// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput {
+ s.SubjectFromWebIdentityToken = &v
+ return s
+}
+
+// The identifiers for the temporary security credentials that the operation
+// returns.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
+type AssumedRoleUser struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN of the temporary security credentials that are returned from the
+ // AssumeRole action. For more information about ARNs and how to use them in
+ // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ //
+ // Arn is a required field
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // A unique identifier that contains the role ID and the role session name of
+ // the role that is being assumed. The role ID is generated by AWS when the
+ // role is created.
+ //
+ // AssumedRoleId is a required field
+ AssumedRoleId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumedRoleUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumedRoleUser) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser {
+ s.Arn = &v
+ return s
+}
+
+// SetAssumedRoleId sets the AssumedRoleId field's value.
+func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
+ s.AssumedRoleId = &v
+ return s
+}
+
+// AWS credentials for API authentication.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials
+type Credentials struct {
+ _ struct{} `type:"structure"`
+
+ // The access key ID that identifies the temporary security credentials.
+ //
+ // AccessKeyId is a required field
+ AccessKeyId *string `min:"16" type:"string" required:"true"`
+
+ // The date on which the current credentials expire.
+ //
+ // Expiration is a required field
+ Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"`
+
+ // The secret access key that can be used to sign requests.
+ //
+ // SecretAccessKey is a required field
+ SecretAccessKey *string `type:"string" required:"true"`
+
+ // The token that users must pass to the service API to use the temporary credentials.
+ //
+ // SessionToken is a required field
+ SessionToken *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Credentials) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Credentials) GoString() string {
+ return s.String()
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *Credentials) SetAccessKeyId(v string) *Credentials {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *Credentials) SetExpiration(v time.Time) *Credentials {
+ s.Expiration = &v
+ return s
+}
+
+// SetSecretAccessKey sets the SecretAccessKey field's value.
+func (s *Credentials) SetSecretAccessKey(v string) *Credentials {
+ s.SecretAccessKey = &v
+ return s
+}
+
+// SetSessionToken sets the SessionToken field's value.
+func (s *Credentials) SetSessionToken(v string) *Credentials {
+ s.SessionToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest
+type DecodeAuthorizationMessageInput struct {
+ _ struct{} `type:"structure"`
+
+ // The encoded message that was returned with the response.
+ //
+ // EncodedMessage is a required field
+ EncodedMessage *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DecodeAuthorizationMessageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"}
+ if s.EncodedMessage == nil {
+ invalidParams.Add(request.NewErrParamRequired("EncodedMessage"))
+ }
+ if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEncodedMessage sets the EncodedMessage field's value.
+func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput {
+ s.EncodedMessage = &v
+ return s
+}
+
+// A document that contains additional information about the authorization status
+// of a request from an encoded message that is returned in response to an AWS
+// request.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse
+type DecodeAuthorizationMessageOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An XML document that contains the decoded message.
+ DecodedMessage *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageOutput) GoString() string {
+ return s.String()
+}
+
+// SetDecodedMessage sets the DecodedMessage field's value.
+func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput {
+ s.DecodedMessage = &v
+ return s
+}
+
+// Identifiers for the federated user that is associated with the credentials.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser
+type FederatedUser struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN that specifies the federated user that is associated with the credentials.
+ // For more information about ARNs and how to use them in policies, see IAM
+ // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ //
+ // Arn is a required field
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // The string that identifies the federated user associated with the credentials,
+ // similar to the unique ID of an IAM user.
+ //
+ // FederatedUserId is a required field
+ FederatedUserId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s FederatedUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FederatedUser) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *FederatedUser) SetArn(v string) *FederatedUser {
+ s.Arn = &v
+ return s
+}
+
+// SetFederatedUserId sets the FederatedUserId field's value.
+func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
+ s.FederatedUserId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest
+type GetCallerIdentityInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Contains the response to a successful GetCallerIdentity request, including
+// information about the entity making the request.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse
+type GetCallerIdentityOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The AWS account ID number of the account that owns or contains the calling
+ // entity.
+ Account *string `type:"string"`
+
+ // The AWS ARN associated with the calling entity.
+ Arn *string `min:"20" type:"string"`
+
+ // The unique identifier of the calling entity. The exact value depends on the
+ // type of entity making the call. The values returned are those listed in the
+ // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
+ // found on the Policy Variables reference page in the IAM User Guide.
+ UserId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccount sets the Account field's value.
+func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput {
+ s.Account = &v
+ return s
+}
+
+// SetArn sets the Arn field's value.
+func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput {
+ s.Arn = &v
+ return s
+}
+
+// SetUserId sets the UserId field's value.
+func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput {
+ s.UserId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest
+type GetFederationTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, that the session should last. Acceptable durations
+ // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds
+ // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained
+ // using AWS account (root) credentials are restricted to a maximum of 3600
+ // seconds (one hour). If the specified duration is longer than one hour, the
+ // session obtained by using AWS account (root) credentials defaults to one
+ // hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The name of the federated user. The name is used as an identifier for the
+ // temporary security credentials (such as Bob). For example, you can reference
+ // the federated user name in a resource-based policy, such as in an Amazon
+ // S3 bucket policy.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // Name is a required field
+ Name *string `min:"2" type:"string" required:"true"`
+
+ // An IAM policy in JSON format that is passed with the GetFederationToken call
+ // and evaluated along with the policy or policies that are attached to the
+ // IAM user whose credentials are used to call GetFederationToken. The passed
+ // policy is used to scope down the permissions that are available to the IAM
+ // user, by allowing only a subset of the permissions that are granted to the
+ // IAM user. The passed policy cannot grant more permissions than those granted
+ // to the IAM user. The final permissions for the federated user are the most
+ // restrictive set based on the intersection of the passed policy and the IAM
+ // user policy.
+ //
+ // If you do not pass a policy, the resulting temporary security credentials
+ // have no effective permissions. The only exception is when the temporary security
+ // credentials are used to access a resource that has a resource-based policy
+ // that specifically allows the federated user to access the resource.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ //
+ // For more information about how permissions work, see Permissions for GetFederationToken
+ // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+ Policy *string `min:"1" type:"string"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetFederationTokenInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Name == nil {
+ invalidParams.Add(request.NewErrParamRequired("Name"))
+ }
+ if s.Name != nil && len(*s.Name) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("Name", 2))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput {
+ s.Name = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
+ s.Policy = &v
+ return s
+}
+
+// Contains the response to a successful GetFederationToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse
+type GetFederationTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // Identifiers for the federated user associated with the credentials (such
+ // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You
+ // can use the federated user's ARN in your resource-based policies, such as
+ // an Amazon S3 bucket policy.
+ FederatedUser *FederatedUser `type:"structure"`
+
+ // A percentage value indicating the size of the policy in packed form. The
+ // service rejects policies for which the packed size is greater than 100 percent
+ // of the allowed value.
+ PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetFederatedUser sets the FederatedUser field's value.
+func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput {
+ s.FederatedUser = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest
+type GetSessionTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, that the credentials should remain valid. Acceptable
+ // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600
+ // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions
+ // for AWS account owners are restricted to a maximum of 3600 seconds (one hour).
+ // If the duration is longer than one hour, the session for AWS account owners
+ // defaults to one hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The identification number of the MFA device that is associated with the IAM
+ // user who is making the GetSessionToken call. Specify this value if the IAM
+ // user has a policy that requires MFA authentication. The value is either the
+ // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
+ // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ // You can find the device for an IAM user by going to the AWS Management Console
+ // and viewing the user's security credentials.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if MFA is required. If any policy requires
+ // the IAM user to submit an MFA code, specify this value. If MFA authentication
+ // is required, and the user does not provide a code when requesting a set of
+ // temporary security credentials, the user will receive an "access denied"
+ // response when requesting resources that require MFA authentication.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetSessionTokenInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+ invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
+ }
+ if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+ invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
+ s.TokenCode = &v
+ return s
+}
+
+// Contains the response to a successful GetSessionToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse
+type GetSessionTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput {
+ s.Credentials = v
+ return s
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
new file mode 100644
index 00000000..4010cc7f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
@@ -0,0 +1,12 @@
+package sts
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+func init() {
+ initRequest = func(r *request.Request) {
+ switch r.Operation.Name {
+ case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
+ r.Handlers.Sign.Clear() // these operations are unsigned
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
new file mode 100644
index 00000000..d2af518c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
@@ -0,0 +1,124 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package sts provides the client and types for making API
+// requests to AWS Security Token Service.
+//
+// The AWS Security Token Service (STS) is a web service that enables you to
+// request temporary, limited-privilege credentials for AWS Identity and Access
+// Management (IAM) users or for users that you authenticate (federated users).
+// This guide provides descriptions of the STS API. For more detailed information
+// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+//
+// As an alternative to using the API, you can use one of the AWS SDKs, which
+// consist of libraries and sample code for various programming languages and
+// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
+// way to create programmatic access to STS. For example, the SDKs take care
+// of cryptographically signing requests, managing errors, and retrying requests
+// automatically. For information about the AWS SDKs, including how to download
+// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
+//
+// For information about setting up signatures and authorization through the
+// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
+// in the AWS General Reference. For general information about the Query API,
+// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
+// in Using IAM. For information about using security tokens with other AWS
+// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
+// in the IAM User Guide.
+//
+// If you're new to AWS and need additional technical information about a specific
+// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
+// (http://aws.amazon.com/documentation/).
+//
+// Endpoints
+//
+// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
+// that maps to the US East (N. Virginia) region. Additional regions are available
+// and are activated by default. For more information, see Activating and Deactivating
+// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
+// in the AWS General Reference.
+//
+// Recording API requests
+//
+// STS supports AWS CloudTrail, which is a service that records AWS calls for
+// your AWS account and delivers log files to an Amazon S3 bucket. By using
+// information collected by CloudTrail, you can determine what requests were
+// successfully made to STS, who made the request, when it was made, and so
+// on. To learn more about CloudTrail, including how to turn it on and find
+// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
+//
+// See sts package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
+//
+// Using the Client
+//
+// To use the client for AWS Security Token Service you will first need
+// to create a new instance of it.
+//
+// When creating a client for an AWS service you'll first need to have a Session
+// already created. The Session provides configuration that can be shared
+// between multiple service clients. Additional configuration can be applied to
+// the Session and service's client when they are constructed. The aws package's
+// Config type contains several fields such as Region for the AWS Region the
+// client should make API requests too. The optional Config value can be provided
+// as the variadic argument for Sessions and client creation.
+//
+// Once the service's client is created you can use it to make API requests the
+// AWS service. These clients are safe to use concurrently.
+//
+// // Create a session to share configuration, and load external configuration.
+// sess := session.Must(session.NewSession())
+//
+// // Create the service's client with the session.
+// svc := sts.New(sess)
+//
+// See the SDK's documentation for more information on how to use service clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws package's Config type for more information on configuration options.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the AWS Security Token Service client STS for more
+// information on creating the service's client.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
+//
+// Once the client is created you can make an API request to the service.
+// Each API method takes a input parameter, and returns the service response
+// and an error.
+//
+// The API method will document which error codes the service can be returned
+// by the operation if the service models the API operation's errors. These
+// errors will also be available as const strings prefixed with "ErrCode".
+//
+// result, err := svc.AssumeRole(params)
+// if err != nil {
+// // Cast err to awserr.Error to handle specific error codes.
+// aerr, ok := err.(awserr.Error)
+// if ok && aerr.Code() == <error code to check for> {
+// // Specific error code handling
+// }
+// return err
+// }
+//
+// fmt.Println("AssumeRole result:")
+// fmt.Println(result)
+//
+// Using the Client with Context
+//
+// The service's client also provides methods to make API requests with a Context
+// value. This allows you to control the timeout, and cancellation of pending
+// requests. These methods also take request Option as variadic parameter to apply
+// additional configuration to the API request.
+//
+// ctx := context.Background()
+//
+// result, err := svc.AssumeRoleWithContext(ctx, params)
+//
+// See the request package documentation for more information on using Context pattern
+// with the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
+package sts
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
new file mode 100644
index 00000000..e24884ef
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
@@ -0,0 +1,73 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+const (
+
+ // ErrCodeExpiredTokenException for service response error code
+ // "ExpiredTokenException".
+ //
+ // The web identity token that was passed is expired or is not valid. Get a
+ // new identity token from the identity provider and then retry the request.
+ ErrCodeExpiredTokenException = "ExpiredTokenException"
+
+ // ErrCodeIDPCommunicationErrorException for service response error code
+ // "IDPCommunicationError".
+ //
+ // The request could not be fulfilled because the non-AWS identity provider
+ // (IDP) that was asked to verify the incoming identity token could not be reached.
+ // This is often a transient error caused by network conditions. Retry the request
+ // a limited number of times so that you don't exceed the request rate. If the
+ // error persists, the non-AWS identity provider might be down or not responding.
+ ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
+
+ // ErrCodeIDPRejectedClaimException for service response error code
+ // "IDPRejectedClaim".
+ //
+ // The identity provider (IdP) reported that authentication failed. This might
+ // be because the claim is invalid.
+ //
+ // If this error is returned for the AssumeRoleWithWebIdentity operation, it
+ // can also mean that the claim has expired or has been explicitly revoked.
+ ErrCodeIDPRejectedClaimException = "IDPRejectedClaim"
+
+ // ErrCodeInvalidAuthorizationMessageException for service response error code
+ // "InvalidAuthorizationMessageException".
+ //
+ // The error returned if the message passed to DecodeAuthorizationMessage was
+ // invalid. This can happen if the token contains invalid characters, such as
+ // linebreaks.
+ ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
+
+ // ErrCodeInvalidIdentityTokenException for service response error code
+ // "InvalidIdentityToken".
+ //
+ // The web identity token that was passed could not be validated by AWS. Get
+ // a new identity token from the identity provider and then retry the request.
+ ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken"
+
+ // ErrCodeMalformedPolicyDocumentException for service response error code
+ // "MalformedPolicyDocument".
+ //
+ // The request was rejected because the policy document was malformed. The error
+ // message describes the specific error.
+ ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument"
+
+ // ErrCodePackedPolicyTooLargeException for service response error code
+ // "PackedPolicyTooLarge".
+ //
+ // The request was rejected because the policy document was too large. The error
+ // message describes how big the policy document is, in packed form, as a percentage
+ // of what the API allows.
+ ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
+
+ // ErrCodeRegionDisabledException for service response error code
+ // "RegionDisabledException".
+ //
+ // STS is not activated in the requested region for the account that is being
+ // asked to generate credentials. The account administrator must use the IAM
+ // console to activate STS in that region. For more information, see Activating
+ // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+ // in the IAM User Guide.
+ ErrCodeRegionDisabledException = "RegionDisabledException"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
new file mode 100644
index 00000000..1ee5839e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -0,0 +1,93 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+)
+
+// STS provides the API operation methods for making requests to
+// AWS Security Token Service. See this package's package overview docs
+// for details on the service.
+//
+// STS methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type STS struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "sts" // Service endpoint prefix API calls made to.
+ EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
+)
+
+// New creates a new instance of the STS client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a STS client from just a session.
+// svc := sts.New(mySession)
+//
+// // Create a STS client with additional configuration
+// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS {
+ svc := &STS{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2011-06-15",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(query.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a STS operation and runs any
+// custom request initialization.
+func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/bgentry/go-netrc/LICENSE b/vendor/github.com/bgentry/go-netrc/LICENSE
new file mode 100644
index 00000000..aade9a58
--- /dev/null
+++ b/vendor/github.com/bgentry/go-netrc/LICENSE
@@ -0,0 +1,20 @@
+Original version Copyright © 2010 Fazlul Shahriar <fshahriar@gmail.com>. Newer
+portions Copyright © 2014 Blake Gentry <blakesgentry@gmail.com>.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/bgentry/go-netrc/README.md b/vendor/github.com/bgentry/go-netrc/README.md
new file mode 100644
index 00000000..6759f7ad
--- /dev/null
+++ b/vendor/github.com/bgentry/go-netrc/README.md
@@ -0,0 +1,9 @@
+# go-netrc
+
+A Golang package for reading and writing netrc files. This package can parse netrc
+files, make changes to them, and then serialize them back to netrc format, while
+preserving any whitespace that was present in the source file.
+
+[![GoDoc](https://godoc.org/github.com/bgentry/go-netrc?status.png)][godoc]
+
+[godoc]: https://godoc.org/github.com/bgentry/go-netrc "go-netrc on Godoc.org"
diff --git a/vendor/github.com/bgentry/go-netrc/netrc/netrc.go b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go
new file mode 100644
index 00000000..ea49987c
--- /dev/null
+++ b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go
@@ -0,0 +1,510 @@
+package netrc
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+type tkType int
+
+const (
+ tkMachine tkType = iota
+ tkDefault
+ tkLogin
+ tkPassword
+ tkAccount
+ tkMacdef
+ tkComment
+ tkWhitespace
+)
+
+var keywords = map[string]tkType{
+ "machine": tkMachine,
+ "default": tkDefault,
+ "login": tkLogin,
+ "password": tkPassword,
+ "account": tkAccount,
+ "macdef": tkMacdef,
+ "#": tkComment,
+}
+
+type Netrc struct {
+ tokens []*token
+ machines []*Machine
+ macros Macros
+ updateLock sync.Mutex
+}
+
+// FindMachine returns the Machine in n named by name. If a machine named by
+// name exists, it is returned. If no Machine with name name is found and there
+// is a ``default'' machine, the ``default'' machine is returned. Otherwise, nil
+// is returned.
+func (n *Netrc) FindMachine(name string) (m *Machine) {
+ // TODO(bgentry): not safe for concurrency
+ var def *Machine
+ for _, m = range n.machines {
+ if m.Name == name {
+ return m
+ }
+ if m.IsDefault() {
+ def = m
+ }
+ }
+ if def == nil {
+ return nil
+ }
+ return def
+}
+
+// MarshalText implements the encoding.TextMarshaler interface to encode a
+// Netrc into text format.
+func (n *Netrc) MarshalText() (text []byte, err error) {
+ // TODO(bgentry): not safe for concurrency
+ for i := range n.tokens {
+ switch n.tokens[i].kind {
+ case tkComment, tkDefault, tkWhitespace: // always append these types
+ text = append(text, n.tokens[i].rawkind...)
+ default:
+ if n.tokens[i].value != "" { // skip empty-value tokens
+ text = append(text, n.tokens[i].rawkind...)
+ }
+ }
+ if n.tokens[i].kind == tkMacdef {
+ text = append(text, ' ')
+ text = append(text, n.tokens[i].macroName...)
+ }
+ text = append(text, n.tokens[i].rawvalue...)
+ }
+ return
+}
+
+func (n *Netrc) NewMachine(name, login, password, account string) *Machine {
+ n.updateLock.Lock()
+ defer n.updateLock.Unlock()
+
+ prefix := "\n"
+ if len(n.tokens) == 0 {
+ prefix = ""
+ }
+ m := &Machine{
+ Name: name,
+ Login: login,
+ Password: password,
+ Account: account,
+
+ nametoken: &token{
+ kind: tkMachine,
+ rawkind: []byte(prefix + "machine"),
+ value: name,
+ rawvalue: []byte(" " + name),
+ },
+ logintoken: &token{
+ kind: tkLogin,
+ rawkind: []byte("\n\tlogin"),
+ value: login,
+ rawvalue: []byte(" " + login),
+ },
+ passtoken: &token{
+ kind: tkPassword,
+ rawkind: []byte("\n\tpassword"),
+ value: password,
+ rawvalue: []byte(" " + password),
+ },
+ accounttoken: &token{
+ kind: tkAccount,
+ rawkind: []byte("\n\taccount"),
+ value: account,
+ rawvalue: []byte(" " + account),
+ },
+ }
+ n.insertMachineTokensBeforeDefault(m)
+ for i := range n.machines {
+ if n.machines[i].IsDefault() {
+ n.machines = append(append(n.machines[:i], m), n.machines[i:]...)
+ return m
+ }
+ }
+ n.machines = append(n.machines, m)
+ return m
+}
+
+func (n *Netrc) insertMachineTokensBeforeDefault(m *Machine) {
+ newtokens := []*token{m.nametoken}
+ if m.logintoken.value != "" {
+ newtokens = append(newtokens, m.logintoken)
+ }
+ if m.passtoken.value != "" {
+ newtokens = append(newtokens, m.passtoken)
+ }
+ if m.accounttoken.value != "" {
+ newtokens = append(newtokens, m.accounttoken)
+ }
+ for i := range n.tokens {
+ if n.tokens[i].kind == tkDefault {
+ // found the default, now insert tokens before it
+ n.tokens = append(n.tokens[:i], append(newtokens, n.tokens[i:]...)...)
+ return
+ }
+ }
+ // didn't find a default, just add the newtokens to the end
+ n.tokens = append(n.tokens, newtokens...)
+ return
+}
+
+func (n *Netrc) RemoveMachine(name string) {
+ n.updateLock.Lock()
+ defer n.updateLock.Unlock()
+
+ for i := range n.machines {
+ if n.machines[i] != nil && n.machines[i].Name == name {
+ m := n.machines[i]
+ for _, t := range []*token{
+ m.nametoken, m.logintoken, m.passtoken, m.accounttoken,
+ } {
+ n.removeToken(t)
+ }
+ n.machines = append(n.machines[:i], n.machines[i+1:]...)
+ return
+ }
+ }
+}
+
+func (n *Netrc) removeToken(t *token) {
+ if t != nil {
+ for i := range n.tokens {
+ if n.tokens[i] == t {
+ n.tokens = append(n.tokens[:i], n.tokens[i+1:]...)
+ return
+ }
+ }
+ }
+}
+
+// Machine contains information about a remote machine.
+type Machine struct {
+ Name string
+ Login string
+ Password string
+ Account string
+
+ nametoken *token
+ logintoken *token
+ passtoken *token
+ accounttoken *token
+}
+
+// IsDefault returns true if the machine is a "default" token, denoted by an
+// empty name.
+func (m *Machine) IsDefault() bool {
+ return m.Name == ""
+}
+
+// UpdatePassword sets the password for the Machine m.
+func (m *Machine) UpdatePassword(newpass string) {
+ m.Password = newpass
+ updateTokenValue(m.passtoken, newpass)
+}
+
+// UpdateLogin sets the login for the Machine m.
+func (m *Machine) UpdateLogin(newlogin string) {
+ m.Login = newlogin
+ updateTokenValue(m.logintoken, newlogin)
+}
+
+// UpdateAccount sets the login for the Machine m.
+func (m *Machine) UpdateAccount(newaccount string) {
+ m.Account = newaccount
+ updateTokenValue(m.accounttoken, newaccount)
+}
+
+func updateTokenValue(t *token, value string) {
+ oldvalue := t.value
+ t.value = value
+ newraw := make([]byte, len(t.rawvalue))
+ copy(newraw, t.rawvalue)
+ t.rawvalue = append(
+ bytes.TrimSuffix(newraw, []byte(oldvalue)),
+ []byte(value)...,
+ )
+}
+
+// Macros contains all the macro definitions in a netrc file.
+type Macros map[string]string
+
+type token struct {
+ kind tkType
+ macroName string
+ value string
+ rawkind []byte
+ rawvalue []byte
+}
+
+// Error represents a netrc file parse error.
+type Error struct {
+ LineNum int // Line number
+ Msg string // Error message
+}
+
+// Error returns a string representation of error e.
+func (e *Error) Error() string {
+ return fmt.Sprintf("line %d: %s", e.LineNum, e.Msg)
+}
+
+func (e *Error) BadDefaultOrder() bool {
+ return e.Msg == errBadDefaultOrder
+}
+
+const errBadDefaultOrder = "default token must appear after all machine tokens"
+
+// scanLinesKeepPrefix is a split function for a Scanner that returns each line
+// of text. The returned token may include newlines if they are before the
+// first non-space character. The returned line may be empty. The end-of-line
+// marker is one optional carriage return followed by one mandatory newline. In
+// regular expression notation, it is `\r?\n`. The last non-empty line of
+// input will be returned even if it has no newline.
+func scanLinesKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if atEOF && len(data) == 0 {
+ return 0, nil, nil
+ }
+ // Skip leading spaces.
+ start := 0
+ for width := 0; start < len(data); start += width {
+ var r rune
+ r, width = utf8.DecodeRune(data[start:])
+ if !unicode.IsSpace(r) {
+ break
+ }
+ }
+ if i := bytes.IndexByte(data[start:], '\n'); i >= 0 {
+ // We have a full newline-terminated line.
+ return start + i, data[0 : start+i], nil
+ }
+ // If we're at EOF, we have a final, non-terminated line. Return it.
+ if atEOF {
+ return len(data), data, nil
+ }
+ // Request more data.
+ return 0, nil, nil
+}
+
+// scanWordsKeepPrefix is a split function for a Scanner that returns each
+// space-separated word of text, with prefixing spaces included. It will never
+// return an empty string. The definition of space is set by unicode.IsSpace.
+//
+// Adapted from bufio.ScanWords().
+func scanTokensKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ // Skip leading spaces.
+ start := 0
+ for width := 0; start < len(data); start += width {
+ var r rune
+ r, width = utf8.DecodeRune(data[start:])
+ if !unicode.IsSpace(r) {
+ break
+ }
+ }
+ if atEOF && len(data) == 0 || start == len(data) {
+ return len(data), data, nil
+ }
+ if len(data) > start && data[start] == '#' {
+ return scanLinesKeepPrefix(data, atEOF)
+ }
+ // Scan until space, marking end of word.
+ for width, i := 0, start; i < len(data); i += width {
+ var r rune
+ r, width = utf8.DecodeRune(data[i:])
+ if unicode.IsSpace(r) {
+ return i, data[:i], nil
+ }
+ }
+ // If we're at EOF, we have a final, non-empty, non-terminated word. Return it.
+ if atEOF && len(data) > start {
+ return len(data), data, nil
+ }
+ // Request more data.
+ return 0, nil, nil
+}
+
+func newToken(rawb []byte) (*token, error) {
+ _, tkind, err := bufio.ScanWords(rawb, true)
+ if err != nil {
+ return nil, err
+ }
+ var ok bool
+ t := token{rawkind: rawb}
+ t.kind, ok = keywords[string(tkind)]
+ if !ok {
+ trimmed := strings.TrimSpace(string(tkind))
+ if trimmed == "" {
+ t.kind = tkWhitespace // whitespace-only, should happen only at EOF
+ return &t, nil
+ }
+ if strings.HasPrefix(trimmed, "#") {
+ t.kind = tkComment // this is a comment
+ return &t, nil
+ }
+ return &t, fmt.Errorf("keyword expected; got " + string(tkind))
+ }
+ return &t, nil
+}
+
+func scanValue(scanner *bufio.Scanner, pos int) ([]byte, string, int, error) {
+ if scanner.Scan() {
+ raw := scanner.Bytes()
+ pos += bytes.Count(raw, []byte{'\n'})
+ return raw, strings.TrimSpace(string(raw)), pos, nil
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, "", pos, &Error{pos, err.Error()}
+ }
+ return nil, "", pos, nil
+}
+
+func parse(r io.Reader, pos int) (*Netrc, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ nrc := Netrc{machines: make([]*Machine, 0, 20), macros: make(Macros, 10)}
+
+ defaultSeen := false
+ var currentMacro *token
+ var m *Machine
+ var t *token
+ scanner := bufio.NewScanner(bytes.NewReader(b))
+ scanner.Split(scanTokensKeepPrefix)
+
+ for scanner.Scan() {
+ rawb := scanner.Bytes()
+ if len(rawb) == 0 {
+ break
+ }
+ pos += bytes.Count(rawb, []byte{'\n'})
+ t, err = newToken(rawb)
+ if err != nil {
+ if currentMacro == nil {
+ return nil, &Error{pos, err.Error()}
+ }
+ currentMacro.rawvalue = append(currentMacro.rawvalue, rawb...)
+ continue
+ }
+
+ if currentMacro != nil && bytes.Contains(rawb, []byte{'\n', '\n'}) {
+ // if macro rawvalue + rawb would contain \n\n, then macro def is over
+ currentMacro.value = strings.TrimLeft(string(currentMacro.rawvalue), "\r\n")
+ nrc.macros[currentMacro.macroName] = currentMacro.value
+ currentMacro = nil
+ }
+
+ switch t.kind {
+ case tkMacdef:
+ if _, t.macroName, pos, err = scanValue(scanner, pos); err != nil {
+ return nil, &Error{pos, err.Error()}
+ }
+ currentMacro = t
+ case tkDefault:
+ if defaultSeen {
+ return nil, &Error{pos, "multiple default token"}
+ }
+ if m != nil {
+ nrc.machines, m = append(nrc.machines, m), nil
+ }
+ m = new(Machine)
+ m.Name = ""
+ defaultSeen = true
+ case tkMachine:
+ if defaultSeen {
+ return nil, &Error{pos, errBadDefaultOrder}
+ }
+ if m != nil {
+ nrc.machines, m = append(nrc.machines, m), nil
+ }
+ m = new(Machine)
+ if t.rawvalue, m.Name, pos, err = scanValue(scanner, pos); err != nil {
+ return nil, &Error{pos, err.Error()}
+ }
+ t.value = m.Name
+ m.nametoken = t
+ case tkLogin:
+ if m == nil || m.Login != "" {
+ return nil, &Error{pos, "unexpected token login "}
+ }
+ if t.rawvalue, m.Login, pos, err = scanValue(scanner, pos); err != nil {
+ return nil, &Error{pos, err.Error()}
+ }
+ t.value = m.Login
+ m.logintoken = t
+ case tkPassword:
+ if m == nil || m.Password != "" {
+ return nil, &Error{pos, "unexpected token password"}
+ }
+ if t.rawvalue, m.Password, pos, err = scanValue(scanner, pos); err != nil {
+ return nil, &Error{pos, err.Error()}
+ }
+ t.value = m.Password
+ m.passtoken = t
+ case tkAccount:
+ if m == nil || m.Account != "" {
+ return nil, &Error{pos, "unexpected token account"}
+ }
+ if t.rawvalue, m.Account, pos, err = scanValue(scanner, pos); err != nil {
+ return nil, &Error{pos, err.Error()}
+ }
+ t.value = m.Account
+ m.accounttoken = t
+ }
+
+ nrc.tokens = append(nrc.tokens, t)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ if m != nil {
+ nrc.machines, m = append(nrc.machines, m), nil
+ }
+ return &nrc, nil
+}
+
+// ParseFile opens the file at filename and then passes its io.Reader to
+// Parse().
+func ParseFile(filename string) (*Netrc, error) {
+ fd, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close()
+ return Parse(fd)
+}
+
+// Parse parses from the the Reader r as a netrc file and returns the set of
+// machine information and macros defined in it. The ``default'' machine,
+// which is intended to be used when no machine name matches, is identified
+// by an empty machine name. There can be only one ``default'' machine.
+//
+// If there is a parsing error, an Error is returned.
+func Parse(r io.Reader) (*Netrc, error) {
+ return parse(r, 1)
+}
+
+// FindMachine parses the netrc file identified by filename and returns the
+// Machine named by name. If a problem occurs parsing the file at filename, an
+// error is returned. If a machine named by name exists, it is returned. If no
+// Machine with name name is found and there is a ``default'' machine, the
+// ``default'' machine is returned. Otherwise, nil is returned.
+func FindMachine(filename, name string) (m *Machine, err error) {
+ n, err := ParseFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ return n.FindMachine(name), nil
+}
diff --git a/vendor/github.com/c4milo/gotoolkit/LICENSE b/vendor/github.com/c4milo/gotoolkit/LICENSE
new file mode 100644
index 00000000..52d13511
--- /dev/null
+++ b/vendor/github.com/c4milo/gotoolkit/LICENSE
@@ -0,0 +1,374 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/c4milo/gotoolkit/README.md b/vendor/github.com/c4milo/gotoolkit/README.md
new file mode 100644
index 00000000..3aaa5536
--- /dev/null
+++ b/vendor/github.com/c4milo/gotoolkit/README.md
@@ -0,0 +1,12 @@
+# Go Toolkit
+[![GoDoc](https://godoc.org/github.com/c4milo/gotoolkit?status.svg)](https://godoc.org/github.com/c4milo/gotoolkit)
+[![Build Status](https://travis-ci.org/c4milo/gotoolkit.svg?branch=master)](https://travis-ci.org/c4milo/gotoolkit)
+
+A set of algorithms and data structures that are not part of the standard Go packages and that we need
+on a daily basis.
+
+## Algorithms
+* **SliceStack**
+* **ListStack**
+* **ListQueue**
+* **SliceQueue**
diff --git a/vendor/github.com/c4milo/gotoolkit/queue.go b/vendor/github.com/c4milo/gotoolkit/queue.go
new file mode 100644
index 00000000..6caf1258
--- /dev/null
+++ b/vendor/github.com/c4milo/gotoolkit/queue.go
@@ -0,0 +1,114 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package gotoolkit
+
+import "errors"
+
+// Queue defines an interface for implementing Queue data structures.
+type Queue interface {
+ Enqueue(item interface{})
+ Dequeue() (interface{}, error)
+ Peek() (interface{}, error)
+ IsEmpty() bool
+ Size() uint64
+}
+
+// ListQueue implements a queue backed by a linked list, it may be faster than SliceQueue
+// but consumes more memory and has worse cache locality than SliceQueue
+type ListQueue struct {
+ first *node
+ last *node
+ size uint64
+}
+
+// Enqueue adds an element to the end of the queue.
+func (q *ListQueue) Enqueue(item interface{}) {
+ last := new(node)
+ last.item = item
+
+ if q.IsEmpty() {
+ q.first = last
+ } else {
+ oldLast := q.last
+ oldLast.next = last
+ }
+ q.last = last
+ q.size++
+}
+
+// Dequeue removes the first element from the queue.
+func (q *ListQueue) Dequeue() (interface{}, error) {
+ if q.IsEmpty() {
+ q.last = nil
+ return nil, errors.New("unable to dequeue element, queue is empty")
+ }
+
+ item := q.first.item
+ q.first = q.first.next
+ q.size--
+ return item, nil
+}
+
+// Peek returns the first element in the queue without removing it.
+func (q *ListQueue) Peek() (interface{}, error) {
+ if q.IsEmpty() {
+ return nil, errors.New("unable to peek element, queue is empty")
+ }
+ return q.first.item, nil
+}
+
+// IsEmpty returns whether the queue is empty or not.
+func (q *ListQueue) IsEmpty() bool {
+ return q.first == nil
+}
+
+// Size returns the number of elements in the queue.
+func (q *ListQueue) Size() uint64 {
+ return q.size
+}
+
+// SliceQueue implements a queue backed by a growing slice. Useful for memory
+// constrained environments. It also has better cache locality than ListQueue
+type SliceQueue struct {
+ size uint64
+ backing []interface{}
+}
+
+// Enqueue adds an element to the end of the queue.
+func (s *SliceQueue) Enqueue(item interface{}) {
+ s.size++
+ s.backing = append(s.backing, item)
+}
+
+// Peek returns the first element of the queue without removing it from the queue.
+func (s *SliceQueue) Peek() (interface{}, error) {
+ if s.IsEmpty() {
+ return nil, errors.New("unable to peek element, queue is empty")
+ }
+ return s.backing[0], nil
+}
+
+// Dequeue removes and return the first element from the queue.
+func (s *SliceQueue) Dequeue() (interface{}, error) {
+ if s.IsEmpty() {
+ return nil, errors.New("unable to dequeue element, queue is empty")
+ }
+
+ item := s.backing[0]
+ // https://github.com/golang/go/wiki/SliceTricks
+ s.backing = append(s.backing[:0], s.backing[1:]...)
+ s.size--
+ return item, nil
+}
+
+// IsEmpty returns whether or not the queue is empty.
+func (s *SliceQueue) IsEmpty() bool {
+ return len(s.backing) == 0
+}
+
+// Size returns the number of elements in the queue.
+func (s *SliceQueue) Size() uint64 {
+ return s.size
+}
diff --git a/vendor/github.com/c4milo/gotoolkit/stack.go b/vendor/github.com/c4milo/gotoolkit/stack.go
new file mode 100644
index 00000000..8314cb37
--- /dev/null
+++ b/vendor/github.com/c4milo/gotoolkit/stack.go
@@ -0,0 +1,110 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package gotoolkit
+
+import "errors"
+
+// Stack defines an interface for implementing Stack data structures.
+type Stack interface {
+ Push(item interface{})
+ Pop() (interface{}, error)
+ Peek() (interface{}, error)
+ IsEmpty() bool
+ Size() uint64
+}
+
+type node struct {
+ item interface{}
+ next *node
+}
+
+// ListStack is a stack backed by a linked list, it may be faster than SliceStack
+// but consumes more memory and has worse cache locality than SliceQueue.
+type ListStack struct {
+ first *node
+ size uint64
+}
+
+// Push adds an element to the top of the stack.
+func (s *ListStack) Push(item interface{}) {
+ oldFirst := s.first
+ s.first = new(node)
+ s.first.item = item
+ s.first.next = oldFirst
+ s.size++
+}
+
+// Peek returns the latest added element without removing it from the stack.
+func (s *ListStack) Peek() (interface{}, error) {
+ if s.IsEmpty() {
+ return nil, errors.New("unable to peek element, stack is empty")
+ }
+
+ return s.first.item, nil
+}
+
+// Pop removes and return the latest added element from the stack.
+func (s *ListStack) Pop() (interface{}, error) {
+ if s.IsEmpty() {
+ return nil, errors.New("unable to pop element, stack is empty")
+ }
+ item := s.first.item
+ s.first = s.first.next
+ s.size--
+ return item, nil
+}
+
+// IsEmpty returns whether or not the stack is empty.
+func (s *ListStack) IsEmpty() bool {
+ return s.first == nil
+}
+
+// Size returns the number of elements in the stack.
+func (s *ListStack) Size() uint64 {
+ return s.size
+}
+
+// SliceStack implements a stack backed by a growing slice. Useful for memory
+// constrained environments. It also has better cache locality.
+type SliceStack struct {
+ size uint64
+ backing []interface{}
+}
+
+// Push adds an element to the top of the stack.
+func (s *SliceStack) Push(item interface{}) {
+ s.size++
+ s.backing = append(s.backing, item)
+}
+
+// Peek returns the latest added element without removing it from the stack.
+func (s *SliceStack) Peek() (interface{}, error) {
+ if s.IsEmpty() {
+ return nil, errors.New("unable to peek element, stack is empty")
+ }
+ return s.backing[s.size-1], nil
+}
+
+// Pop removes and return the latest added element from the stack.
+func (s *SliceStack) Pop() (interface{}, error) {
+ if s.IsEmpty() {
+ return nil, errors.New("unable to pop element, stack is empty")
+ }
+
+ s.size--
+ item := s.backing[s.size]
+ s.backing = s.backing[0:s.size]
+ return item, nil
+}
+
+// IsEmpty returns whether or not the stack is empty.
+func (s *SliceStack) IsEmpty() bool {
+ return len(s.backing) == 0
+}
+
+// Size returns the number of elements in the stack.
+func (s *SliceStack) Size() uint64 {
+ return s.size
+}
diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/coreos/go-semver/README.md b/vendor/github.com/coreos/go-semver/README.md
new file mode 100644
index 00000000..5bc9263c
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/README.md
@@ -0,0 +1,28 @@
+# go-semver - Semantic Versioning Library
+
+[![Build Status](https://travis-ci.org/coreos/go-semver.svg?branch=master)](https://travis-ci.org/coreos/go-semver)
+[![GoDoc](https://godoc.org/github.com/coreos/go-semver/semver?status.svg)](https://godoc.org/github.com/coreos/go-semver/semver)
+
+go-semver is a [semantic versioning][semver] library for Go. It lets you parse
+and compare two semantic version strings.
+
+[semver]: http://semver.org/
+
+## Usage
+
+```go
+vA := semver.New("1.2.3")
+vB := semver.New("3.2.1")
+
+fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB))
+```
+
+## Example Application
+
+```
+$ go run example.go 1.2.3 3.2.1
+1.2.3 < 3.2.1 == true
+
+$ go run example.go 5.2.3 3.2.1
+5.2.3 < 3.2.1 == false
+```
diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go
new file mode 100644
index 00000000..9c8072d1
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/semver/semver.go
@@ -0,0 +1,275 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Semantic Versions http://semver.org
+package semver
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type Version struct {
+ Major int64
+ Minor int64
+ Patch int64
+ PreRelease PreRelease
+ Metadata string
+}
+
+type PreRelease string
+
+func splitOff(input *string, delim string) (val string) {
+ parts := strings.SplitN(*input, delim, 2)
+
+ if len(parts) == 2 {
+ *input = parts[0]
+ val = parts[1]
+ }
+
+ return val
+}
+
+func New(version string) *Version {
+ return Must(NewVersion(version))
+}
+
+func NewVersion(version string) (*Version, error) {
+ v := Version{}
+
+ if err := v.Set(version); err != nil {
+ return nil, err
+ }
+
+ return &v, nil
+}
+
+// Must is a helper for wrapping NewVersion and will panic if err is not nil.
+func Must(v *Version, err error) *Version {
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Set parses and updates v from the given version string. Implements flag.Value
+func (v *Version) Set(version string) error {
+ metadata := splitOff(&version, "+")
+ preRelease := PreRelease(splitOff(&version, "-"))
+ dotParts := strings.SplitN(version, ".", 3)
+
+ if len(dotParts) != 3 {
+ return fmt.Errorf("%s is not in dotted-tri format", version)
+ }
+
+ parsed := make([]int64, 3, 3)
+
+ for i, v := range dotParts[:3] {
+ val, err := strconv.ParseInt(v, 10, 64)
+ parsed[i] = val
+ if err != nil {
+ return err
+ }
+ }
+
+ v.Metadata = metadata
+ v.PreRelease = preRelease
+ v.Major = parsed[0]
+ v.Minor = parsed[1]
+ v.Patch = parsed[2]
+ return nil
+}
+
+func (v Version) String() string {
+ var buffer bytes.Buffer
+
+ fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
+
+ if v.PreRelease != "" {
+ fmt.Fprintf(&buffer, "-%s", v.PreRelease)
+ }
+
+ if v.Metadata != "" {
+ fmt.Fprintf(&buffer, "+%s", v.Metadata)
+ }
+
+ return buffer.String()
+}
+
+func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var data string
+ if err := unmarshal(&data); err != nil {
+ return err
+ }
+ return v.Set(data)
+}
+
+func (v Version) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + v.String() + `"`), nil
+}
+
+func (v *Version) UnmarshalJSON(data []byte) error {
+ l := len(data)
+ if l == 0 || string(data) == `""` {
+ return nil
+ }
+ if l < 2 || data[0] != '"' || data[l-1] != '"' {
+ return errors.New("invalid semver string")
+ }
+ return v.Set(string(data[1 : l-1]))
+}
+
+// Compare tests if v is less than, equal to, or greater than versionB,
+// returning -1, 0, or +1 respectively.
+func (v Version) Compare(versionB Version) int {
+ if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
+ return cmp
+ }
+ return preReleaseCompare(v, versionB)
+}
+
+// Equal tests if v is equal to versionB.
+func (v Version) Equal(versionB Version) bool {
+ return v.Compare(versionB) == 0
+}
+
+// LessThan tests if v is less than versionB.
+func (v Version) LessThan(versionB Version) bool {
+ return v.Compare(versionB) < 0
+}
+
+// Slice converts the comparable parts of the semver into a slice of integers.
+func (v Version) Slice() []int64 {
+ return []int64{v.Major, v.Minor, v.Patch}
+}
+
+func (p PreRelease) Slice() []string {
+ preRelease := string(p)
+ return strings.Split(preRelease, ".")
+}
+
+func preReleaseCompare(versionA Version, versionB Version) int {
+ a := versionA.PreRelease
+ b := versionB.PreRelease
+
+ /* Handle the case where if two versions are otherwise equal it is the
+ * one without a PreRelease that is greater */
+ if len(a) == 0 && (len(b) > 0) {
+ return 1
+ } else if len(b) == 0 && (len(a) > 0) {
+ return -1
+ }
+
+ // If there is a prerelease, check and compare each part.
+ return recursivePreReleaseCompare(a.Slice(), b.Slice())
+}
+
+func recursiveCompare(versionA []int64, versionB []int64) int {
+ if len(versionA) == 0 {
+ return 0
+ }
+
+ a := versionA[0]
+ b := versionB[0]
+
+ if a > b {
+ return 1
+ } else if a < b {
+ return -1
+ }
+
+ return recursiveCompare(versionA[1:], versionB[1:])
+}
+
+func recursivePreReleaseCompare(versionA []string, versionB []string) int {
+ // A larger set of pre-release fields has a higher precedence than a smaller set,
+ // if all of the preceding identifiers are equal.
+ if len(versionA) == 0 {
+ if len(versionB) > 0 {
+ return -1
+ }
+ return 0
+ } else if len(versionB) == 0 {
+ // We're longer than versionB so return 1.
+ return 1
+ }
+
+ a := versionA[0]
+ b := versionB[0]
+
+ aInt := false
+ bInt := false
+
+ aI, err := strconv.Atoi(versionA[0])
+ if err == nil {
+ aInt = true
+ }
+
+ bI, err := strconv.Atoi(versionB[0])
+ if err == nil {
+ bInt = true
+ }
+
+ // Numeric identifiers always have lower precedence than non-numeric identifiers.
+ if aInt && !bInt {
+ return -1
+ } else if !aInt && bInt {
+ return 1
+ }
+
+ // Handle Integer Comparison
+ if aInt && bInt {
+ if aI > bI {
+ return 1
+ } else if aI < bI {
+ return -1
+ }
+ }
+
+ // Handle String Comparison
+ if a > b {
+ return 1
+ } else if a < b {
+ return -1
+ }
+
+ return recursivePreReleaseCompare(versionA[1:], versionB[1:])
+}
+
+// BumpMajor increments the Major field by 1 and resets all other fields to their default values
+func (v *Version) BumpMajor() {
+ v.Major += 1
+ v.Minor = 0
+ v.Patch = 0
+ v.PreRelease = PreRelease("")
+ v.Metadata = ""
+}
+
+// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
+func (v *Version) BumpMinor() {
+ v.Minor += 1
+ v.Patch = 0
+ v.PreRelease = PreRelease("")
+ v.Metadata = ""
+}
+
+// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
+func (v *Version) BumpPatch() {
+ v.Patch += 1
+ v.PreRelease = PreRelease("")
+ v.Metadata = ""
+}
diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go
new file mode 100644
index 00000000..e256b41a
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/semver/sort.go
@@ -0,0 +1,38 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semver
+
+import (
+ "sort"
+)
+
+type Versions []*Version
+
+func (s Versions) Len() int {
+ return len(s)
+}
+
+func (s Versions) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s Versions) Less(i, j int) bool {
+ return s[i].LessThan(*s[j])
+}
+
+// Sort sorts the given slice of Version
+func Sort(versions []*Version) {
+ sort.Sort(Versions(versions))
+}
diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE
new file mode 100644
index 00000000..37ec93a1
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/coreos/go-systemd/README.md b/vendor/github.com/coreos/go-systemd/README.md
new file mode 100644
index 00000000..cb87a112
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/README.md
@@ -0,0 +1,54 @@
+# go-systemd
+
+[![Build Status](https://travis-ci.org/coreos/go-systemd.png?branch=master)](https://travis-ci.org/coreos/go-systemd)
+[![godoc](https://godoc.org/github.com/coreos/go-systemd?status.svg)](http://godoc.org/github.com/coreos/go-systemd)
+
+Go bindings to systemd. The project has several packages:
+
+- `activation` - for writing and using socket activation from Go
+- `dbus` - for starting/stopping/inspecting running services and units
+- `journal` - for writing to systemd's logging service, journald
+- `sdjournal` - for reading from journald by wrapping its C API
+- `machine1` - for registering machines/containers with systemd
+- `unit` - for (de)serialization and comparison of unit files
+
+## Socket Activation
+
+An example HTTP server using socket activation can be quickly set up by following this README on a Linux machine running systemd:
+
+https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver
+
+## Journal
+
+Using the pure-Go `journal` package you can submit journal entries directly to systemd's journal, taking advantage of features like indexed key/value pairs for each log entry.
+The `sdjournal` package provides read access to the journal by wrapping around journald's native C API; consequently it requires cgo and the journal headers to be available.
+
+## D-Bus
+
+The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units. The API docs are here:
+
+http://godoc.org/github.com/coreos/go-systemd/dbus
+
+### Debugging
+
+Create `/etc/dbus-1/system-local.conf` that looks like this:
+
+```
+<!DOCTYPE busconfig PUBLIC
+"-//freedesktop//DTD D-Bus Bus Configuration 1.0//EN"
+"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
+<busconfig>
+ <policy user="root">
+ <allow eavesdrop="true"/>
+ <allow eavesdrop="true" send_destination="*"/>
+ </policy>
+</busconfig>
+```
+
+## machined
+
+The `machine1` package allows interaction with the [systemd machined D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/machined/).
+
+## Units
+
+The `unit` package provides various functions for working with [systemd unit files](http://www.freedesktop.org/software/systemd/man/systemd.unit.html).
diff --git a/vendor/github.com/coreos/go-systemd/unit/deserialize.go b/vendor/github.com/coreos/go-systemd/unit/deserialize.go
new file mode 100644
index 00000000..8a88162f
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/unit/deserialize.go
@@ -0,0 +1,276 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package unit
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "unicode"
+)
+
+const (
+ // SYSTEMD_LINE_MAX mimics the maximum line length that systemd can use.
+ // On typical systemd platforms (i.e. modern Linux), this will most
+ // commonly be 2048, so let's use that as a sanity check.
+ // Technically, we should probably pull this at runtime:
+ // SYSTEMD_LINE_MAX = int(C.sysconf(C.__SC_LINE_MAX))
+ // but this would introduce an (unfortunate) dependency on cgo
+ SYSTEMD_LINE_MAX = 2048
+
+ // characters that systemd considers indicate a newline
+ SYSTEMD_NEWLINE = "\r\n"
+)
+
+var (
+ ErrLineTooLong = fmt.Errorf("line too long (max %d bytes)", SYSTEMD_LINE_MAX)
+)
+
+// Deserialize parses a systemd unit file into a list of UnitOption objects.
+func Deserialize(f io.Reader) (opts []*UnitOption, err error) {
+ lexer, optchan, errchan := newLexer(f)
+ go lexer.lex()
+
+ for opt := range optchan {
+ opts = append(opts, &(*opt))
+ }
+
+ err = <-errchan
+ return opts, err
+}
+
+func newLexer(f io.Reader) (*lexer, <-chan *UnitOption, <-chan error) {
+ optchan := make(chan *UnitOption)
+ errchan := make(chan error, 1)
+ buf := bufio.NewReader(f)
+
+ return &lexer{buf, optchan, errchan, ""}, optchan, errchan
+}
+
+type lexer struct {
+ buf *bufio.Reader
+ optchan chan *UnitOption
+ errchan chan error
+ section string
+}
+
+func (l *lexer) lex() {
+ var err error
+ defer func() {
+ close(l.optchan)
+ close(l.errchan)
+ }()
+ next := l.lexNextSection
+ for next != nil {
+ if l.buf.Buffered() >= SYSTEMD_LINE_MAX {
+ // systemd truncates lines longer than LINE_MAX
+ // https://bugs.freedesktop.org/show_bug.cgi?id=85308
+ // Rather than allowing this to pass silently, let's
+ // explicitly gate people from encountering this
+ line, err := l.buf.Peek(SYSTEMD_LINE_MAX)
+ if err != nil {
+ l.errchan <- err
+ return
+ }
+ if bytes.IndexAny(line, SYSTEMD_NEWLINE) == -1 {
+ l.errchan <- ErrLineTooLong
+ return
+ }
+ }
+
+ next, err = next()
+ if err != nil {
+ l.errchan <- err
+ return
+ }
+ }
+}
+
+type lexStep func() (lexStep, error)
+
+func (l *lexer) lexSectionName() (lexStep, error) {
+ sec, err := l.buf.ReadBytes(']')
+ if err != nil {
+ return nil, errors.New("unable to find end of section")
+ }
+
+ return l.lexSectionSuffixFunc(string(sec[:len(sec)-1])), nil
+}
+
+func (l *lexer) lexSectionSuffixFunc(section string) lexStep {
+ return func() (lexStep, error) {
+ garbage, _, err := l.toEOL()
+ if err != nil {
+ return nil, err
+ }
+
+ garbage = bytes.TrimSpace(garbage)
+ if len(garbage) > 0 {
+ return nil, fmt.Errorf("found garbage after section name %s: %v", l.section, garbage)
+ }
+
+ return l.lexNextSectionOrOptionFunc(section), nil
+ }
+}
+
+func (l *lexer) ignoreLineFunc(next lexStep) lexStep {
+ return func() (lexStep, error) {
+ for {
+ line, _, err := l.toEOL()
+ if err != nil {
+ return nil, err
+ }
+
+ line = bytes.TrimSuffix(line, []byte{' '})
+
+ // lack of continuation means this line has been exhausted
+ if !bytes.HasSuffix(line, []byte{'\\'}) {
+ break
+ }
+ }
+
+ // reached end of buffer, safe to exit
+ return next, nil
+ }
+}
+
+func (l *lexer) lexNextSection() (lexStep, error) {
+ r, _, err := l.buf.ReadRune()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ return nil, err
+ }
+
+ if r == '[' {
+ return l.lexSectionName, nil
+ } else if isComment(r) {
+ return l.ignoreLineFunc(l.lexNextSection), nil
+ }
+
+ return l.lexNextSection, nil
+}
+
+func (l *lexer) lexNextSectionOrOptionFunc(section string) lexStep {
+ return func() (lexStep, error) {
+ r, _, err := l.buf.ReadRune()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ return nil, err
+ }
+
+ if unicode.IsSpace(r) {
+ return l.lexNextSectionOrOptionFunc(section), nil
+ } else if r == '[' {
+ return l.lexSectionName, nil
+ } else if isComment(r) {
+ return l.ignoreLineFunc(l.lexNextSectionOrOptionFunc(section)), nil
+ }
+
+ l.buf.UnreadRune()
+ return l.lexOptionNameFunc(section), nil
+ }
+}
+
+func (l *lexer) lexOptionNameFunc(section string) lexStep {
+ return func() (lexStep, error) {
+ var partial bytes.Buffer
+ for {
+ r, _, err := l.buf.ReadRune()
+ if err != nil {
+ return nil, err
+ }
+
+ if r == '\n' || r == '\r' {
+ return nil, errors.New("unexpected newline encountered while parsing option name")
+ }
+
+ if r == '=' {
+ break
+ }
+
+ partial.WriteRune(r)
+ }
+
+ name := strings.TrimSpace(partial.String())
+ return l.lexOptionValueFunc(section, name, bytes.Buffer{}), nil
+ }
+}
+
+func (l *lexer) lexOptionValueFunc(section, name string, partial bytes.Buffer) lexStep {
+ return func() (lexStep, error) {
+ for {
+ line, eof, err := l.toEOL()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(bytes.TrimSpace(line)) == 0 {
+ break
+ }
+
+ partial.Write(line)
+
+ // lack of continuation means this value has been exhausted
+ idx := bytes.LastIndex(line, []byte{'\\'})
+ if idx == -1 || idx != (len(line)-1) {
+ break
+ }
+
+ if !eof {
+ partial.WriteRune('\n')
+ }
+
+ return l.lexOptionValueFunc(section, name, partial), nil
+ }
+
+ val := partial.String()
+ if strings.HasSuffix(val, "\n") {
+ // A newline was added to the end, so the file didn't end with a backslash.
+ // => Keep the newline
+ val = strings.TrimSpace(val) + "\n"
+ } else {
+ val = strings.TrimSpace(val)
+ }
+ l.optchan <- &UnitOption{Section: section, Name: name, Value: val}
+
+ return l.lexNextSectionOrOptionFunc(section), nil
+ }
+}
+
+// toEOL reads until the end-of-line or end-of-file.
+// Returns (data, EOFfound, error)
+func (l *lexer) toEOL() ([]byte, bool, error) {
+ line, err := l.buf.ReadBytes('\n')
+ // ignore EOF here since it's roughly equivalent to EOL
+ if err != nil && err != io.EOF {
+ return nil, false, err
+ }
+
+ line = bytes.TrimSuffix(line, []byte{'\r'})
+ line = bytes.TrimSuffix(line, []byte{'\n'})
+
+ return line, err == io.EOF, nil
+}
+
+func isComment(r rune) bool {
+ return r == '#' || r == ';'
+}
diff --git a/vendor/github.com/coreos/go-systemd/unit/escape.go b/vendor/github.com/coreos/go-systemd/unit/escape.go
new file mode 100644
index 00000000..63b11726
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/unit/escape.go
@@ -0,0 +1,116 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Implements systemd-escape [--unescape] [--path]
+
+package unit
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+const (
+ allowed = `:_.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`
+)
+
+// If isPath is true:
+// We remove redundant '/'s, the leading '/', and trailing '/'.
+// If the result is empty, a '/' is inserted.
+//
+// We always:
+// Replace the following characters with `\x%x`:
+// Leading `.`
+// `-`, `\`, and anything not in this set: `:-_.\[0-9a-zA-Z]`
+// Replace '/' with '-'.
+func escape(unescaped string, isPath bool) string {
+ e := []byte{}
+ inSlashes := false
+ start := true
+ for i := 0; i < len(unescaped); i++ {
+ c := unescaped[i]
+ if isPath {
+ if c == '/' {
+ inSlashes = true
+ continue
+ } else if inSlashes {
+ inSlashes = false
+ if !start {
+ e = append(e, '-')
+ }
+ }
+ }
+
+ if c == '/' {
+ e = append(e, '-')
+ } else if start && c == '.' || strings.IndexByte(allowed, c) == -1 {
+ e = append(e, []byte(fmt.Sprintf(`\x%x`, c))...)
+ } else {
+ e = append(e, c)
+ }
+ start = false
+ }
+ if isPath && len(e) == 0 {
+ e = append(e, '-')
+ }
+ return string(e)
+}
+
+// If isPath is true:
+// We always return a string beginning with '/'.
+//
+// We always:
+// Replace '-' with '/'.
+// Replace `\x%x` with the value represented in hex.
+func unescape(escaped string, isPath bool) string {
+ u := []byte{}
+ for i := 0; i < len(escaped); i++ {
+ c := escaped[i]
+ if c == '-' {
+ c = '/'
+ } else if c == '\\' && len(escaped)-i >= 4 && escaped[i+1] == 'x' {
+ n, err := strconv.ParseInt(escaped[i+2:i+4], 16, 8)
+ if err == nil {
+ c = byte(n)
+ i += 3
+ }
+ }
+ u = append(u, c)
+ }
+ if isPath && (len(u) == 0 || u[0] != '/') {
+ u = append([]byte("/"), u...)
+ }
+ return string(u)
+}
+
+// UnitNameEscape escapes a string as `systemd-escape` would
+func UnitNameEscape(unescaped string) string {
+ return escape(unescaped, false)
+}
+
+// UnitNameUnescape unescapes a string as `systemd-escape --unescape` would
+func UnitNameUnescape(escaped string) string {
+ return unescape(escaped, false)
+}
+
+// UnitNamePathEscape escapes a string as `systemd-escape --path` would
+func UnitNamePathEscape(unescaped string) string {
+ return escape(unescaped, true)
+}
+
+// UnitNamePathUnescape unescapes a string as `systemd-escape --path --unescape` would
+func UnitNamePathUnescape(escaped string) string {
+ return unescape(escaped, true)
+}
diff --git a/vendor/github.com/coreos/go-systemd/unit/option.go b/vendor/github.com/coreos/go-systemd/unit/option.go
new file mode 100644
index 00000000..e5d21e19
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/unit/option.go
@@ -0,0 +1,54 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package unit
+
+import (
+ "fmt"
+)
+
+type UnitOption struct {
+ Section string
+ Name string
+ Value string
+}
+
+func NewUnitOption(section, name, value string) *UnitOption {
+ return &UnitOption{Section: section, Name: name, Value: value}
+}
+
+func (uo *UnitOption) String() string {
+ return fmt.Sprintf("{Section: %q, Name: %q, Value: %q}", uo.Section, uo.Name, uo.Value)
+}
+
+func (uo *UnitOption) Match(other *UnitOption) bool {
+ return uo.Section == other.Section &&
+ uo.Name == other.Name &&
+ uo.Value == other.Value
+}
+
+func AllMatch(u1 []*UnitOption, u2 []*UnitOption) bool {
+ length := len(u1)
+ if length != len(u2) {
+ return false
+ }
+
+ for i := 0; i < length; i++ {
+ if !u1[i].Match(u2[i]) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/coreos/go-systemd/unit/serialize.go b/vendor/github.com/coreos/go-systemd/unit/serialize.go
new file mode 100644
index 00000000..e07799ca
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/unit/serialize.go
@@ -0,0 +1,75 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package unit
+
+import (
+ "bytes"
+ "io"
+)
+
+// Serialize encodes all of the given UnitOption objects into a
+// unit file. When serialized the options are sorted in their
+// supplied order but grouped by section.
+func Serialize(opts []*UnitOption) io.Reader {
+ var buf bytes.Buffer
+
+ if len(opts) == 0 {
+ return &buf
+ }
+
+ // Index of sections -> ordered options
+ idx := map[string][]*UnitOption{}
+ // Separately preserve order in which sections were seen
+ sections := []string{}
+ for _, opt := range opts {
+ sec := opt.Section
+ if _, ok := idx[sec]; !ok {
+ sections = append(sections, sec)
+ }
+ idx[sec] = append(idx[sec], opt)
+ }
+
+ for i, sect := range sections {
+ writeSectionHeader(&buf, sect)
+ writeNewline(&buf)
+
+ opts := idx[sect]
+ for _, opt := range opts {
+ writeOption(&buf, opt)
+ writeNewline(&buf)
+ }
+ if i < len(sections)-1 {
+ writeNewline(&buf)
+ }
+ }
+
+ return &buf
+}
+
+func writeNewline(buf *bytes.Buffer) {
+ buf.WriteRune('\n')
+}
+
+func writeSectionHeader(buf *bytes.Buffer, section string) {
+ buf.WriteRune('[')
+ buf.WriteString(section)
+ buf.WriteRune(']')
+}
+
+func writeOption(buf *bytes.Buffer, opt *UnitOption) {
+ buf.WriteString(opt.Name)
+ buf.WriteRune('=')
+ buf.WriteString(opt.Value)
+}
diff --git a/vendor/github.com/coreos/ignition/LICENSE b/vendor/github.com/coreos/ignition/LICENSE
new file mode 100644
index 00000000..e06d2081
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/coreos/ignition/NOTICE b/vendor/github.com/coreos/ignition/NOTICE
new file mode 100644
index 00000000..e520005c
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2015 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/ignition/README.md b/vendor/github.com/coreos/ignition/README.md
new file mode 100644
index 00000000..1c96889d
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/README.md
@@ -0,0 +1,12 @@
+# Ignition #
+
+Ignition is the utility used by CoreOS Container Linux to manipulate disks during the initramfs. This includes partitioning disks, formatting partitions, writing files (regular files, systemd units, networkd units, etc.), and configuring users. On first boot, Ignition reads its configuration from a source of truth (remote URL, network metadata service, hypervisor bridge, etc.) and applies the configuration.
+
+## Usage ##
+
+Odds are good that you don't want to invoke Ignition directly. In fact, it isn't even present in the Container Linux root filesystem. Take a look at the [Getting Started Guide][getting started] for details on providing Ignition with a runtime configuration.
+
+Use the [bug tracker][issues] to report bugs.
+
+[getting started]: doc/getting-started.md
+[issues]: https://github.com/coreos/bugs/issues/new?labels=component/ignition
diff --git a/vendor/github.com/coreos/ignition/config/types/config.go b/vendor/github.com/coreos/ignition/config/types/config.go
new file mode 100644
index 00000000..dc1fe3b3
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/config.go
@@ -0,0 +1,80 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+
+ "github.com/coreos/go-semver/semver"
+
+ "github.com/coreos/ignition/config/validate/report"
+)
+
+var (
+ MaxVersion = semver.Version{
+ Major: 2,
+ Minor: 1,
+ PreRelease: "experimental",
+ }
+)
+
+func (c Config) Validate() report.Report {
+ r := report.Report{}
+ rules := []rule{
+ checkFilesFilesystems,
+ checkDuplicateFilesystems,
+ }
+
+ for _, rule := range rules {
+ rule(c, &r)
+ }
+ return r
+}
+
+type rule func(cfg Config, report *report.Report)
+
+func checkFilesFilesystems(cfg Config, r *report.Report) {
+ filesystems := map[string]struct{}{"root": {}}
+ for _, filesystem := range cfg.Storage.Filesystems {
+ filesystems[filesystem.Name] = struct{}{}
+ }
+ for _, file := range cfg.Storage.Files {
+ if file.Filesystem == "" {
+ // Filesystem was not specified. This is an error, but its handled in types.File's Validate, not here
+ continue
+ }
+ _, ok := filesystems[file.Filesystem]
+ if !ok {
+ r.Add(report.Entry{
+ Kind: report.EntryWarning,
+ Message: fmt.Sprintf("File %q references nonexistent filesystem %q. (This is ok if it is defined in a referenced config)",
+ file.Path, file.Filesystem),
+ })
+ }
+ }
+}
+
+func checkDuplicateFilesystems(cfg Config, r *report.Report) {
+ filesystems := map[string]struct{}{"root": {}}
+ for _, filesystem := range cfg.Storage.Filesystems {
+ if _, ok := filesystems[filesystem.Name]; ok {
+ r.Add(report.Entry{
+ Kind: report.EntryWarning,
+ Message: fmt.Sprintf("Filesystem %q shadows exising filesystem definition", filesystem.Name),
+ })
+ }
+ filesystems[filesystem.Name] = struct{}{}
+ }
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/directory.go b/vendor/github.com/coreos/ignition/config/types/directory.go
new file mode 100644
index 00000000..16adad05
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/directory.go
@@ -0,0 +1,30 @@
+// Copyright 2017 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "github.com/coreos/ignition/config/validate/report"
+)
+
+func (d Directory) ValidateMode() report.Report {
+ r := report.Report{}
+ if err := validateMode(d.Mode); err != nil {
+ r.Add(report.Entry{
+ Message: err.Error(),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/disk.go b/vendor/github.com/coreos/ignition/config/types/disk.go
new file mode 100644
index 00000000..21273a1c
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/disk.go
@@ -0,0 +1,126 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+
+ "github.com/coreos/ignition/config/validate/report"
+)
+
+func (n Disk) Validate() report.Report {
+ return report.Report{}
+}
+
+func (n Disk) ValidateDevice() report.Report {
+ if len(n.Device) == 0 {
+ return report.ReportFromError(fmt.Errorf("disk device is required"), report.EntryError)
+ }
+ if err := validatePath(string(n.Device)); err != nil {
+ return report.ReportFromError(err, report.EntryError)
+ }
+ return report.Report{}
+}
+
+func (n Disk) ValidatePartitions() report.Report {
+ r := report.Report{}
+ if n.partitionNumbersCollide() {
+ r.Add(report.Entry{
+ Message: fmt.Sprintf("disk %q: partition numbers collide", n.Device),
+ Kind: report.EntryError,
+ })
+ }
+ if n.partitionsOverlap() {
+ r.Add(report.Entry{
+ Message: fmt.Sprintf("disk %q: partitions overlap", n.Device),
+ Kind: report.EntryError,
+ })
+ }
+ if n.partitionsMisaligned() {
+ r.Add(report.Entry{
+ Message: fmt.Sprintf("disk %q: partitions misaligned", n.Device),
+ Kind: report.EntryError,
+ })
+ }
+ // Disks which have no errors at this point will likely succeed in sgdisk
+ return r
+}
+
+// partitionNumbersCollide returns true if partition numbers in n.Partitions are not unique.
+func (n Disk) partitionNumbersCollide() bool {
+ m := map[int][]Partition{}
+ for _, p := range n.Partitions {
+ m[p.Number] = append(m[p.Number], p)
+ }
+ for _, n := range m {
+ if len(n) > 1 {
+ // TODO(vc): return information describing the collision for logging
+ return true
+ }
+ }
+ return false
+}
+
+// end returns the last sector of a partition.
+func (p Partition) end() int {
+ if p.Size == 0 {
+ // a size of 0 means "fill available", just return the start as the end for those.
+ return p.Start
+ }
+ return p.Start + p.Size - 1
+}
+
+// partitionsOverlap returns true if any explicitly dimensioned partitions overlap
+func (n Disk) partitionsOverlap() bool {
+ for _, p := range n.Partitions {
+ // Starts of 0 are placed by sgdisk into the "largest available block" at that time.
+ // We aren't going to check those for overlap since we don't have the disk geometry.
+ if p.Start == 0 {
+ continue
+ }
+
+ for _, o := range n.Partitions {
+ if p == o || o.Start == 0 {
+ continue
+ }
+
+ // is p.Start within o?
+ if p.Start >= o.Start && p.Start <= o.end() {
+ return true
+ }
+
+ // is p.end() within o?
+ if p.end() >= o.Start && p.end() <= o.end() {
+ return true
+ }
+
+ // do p.Start and p.end() straddle o?
+ if p.Start < o.Start && p.end() > o.end() {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// partitionsMisaligned returns true if any of the partitions don't start on a 2048-sector (1MiB) boundary.
+func (n Disk) partitionsMisaligned() bool {
+ for _, p := range n.Partitions {
+ if (p.Start & (2048 - 1)) != 0 {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/file.go b/vendor/github.com/coreos/ignition/config/types/file.go
new file mode 100644
index 00000000..866fc054
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/file.go
@@ -0,0 +1,62 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/coreos/ignition/config/validate/report"
+)
+
+var (
+ ErrCompressionInvalid = errors.New("invalid compression method")
+)
+
+func (f File) ValidateMode() report.Report {
+ r := report.Report{}
+ if err := validateMode(f.Mode); err != nil {
+ r.Add(report.Entry{
+ Message: err.Error(),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
+
+func (fc FileContents) ValidateCompression() report.Report {
+ r := report.Report{}
+ switch fc.Compression {
+ case "", "gzip":
+ default:
+ r.Add(report.Entry{
+ Message: ErrCompressionInvalid.Error(),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
+
+func (fc FileContents) ValidateSource() report.Report {
+ r := report.Report{}
+ err := validateURL(fc.Source)
+ if err != nil {
+ r.Add(report.Entry{
+ Message: fmt.Sprintf("invalid url %q: %v", fc.Source, err),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/filesystem.go b/vendor/github.com/coreos/ignition/config/types/filesystem.go
new file mode 100644
index 00000000..0b17121a
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/filesystem.go
@@ -0,0 +1,74 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/coreos/ignition/config/validate/report"
+)
+
+var (
+ ErrFilesystemInvalidFormat = errors.New("invalid filesystem format")
+ ErrFilesystemNoMountPath = errors.New("filesystem is missing mount or path")
+ ErrFilesystemMountAndPath = errors.New("filesystem has both mount and path defined")
+)
+
+func (f Filesystem) Validate() report.Report {
+ r := report.Report{}
+ if f.Mount == nil && f.Path == nil {
+ return report.ReportFromError(ErrFilesystemNoMountPath, report.EntryError)
+ }
+ if f.Mount != nil && f.Path != nil {
+ return report.ReportFromError(ErrFilesystemMountAndPath, report.EntryError)
+ }
+ return r
+}
+
+func (f Filesystem) ValidatePath() report.Report {
+ r := report.Report{}
+ if f.Path != nil && validatePath(*f.Path) != nil {
+ r.Add(report.Entry{
+ Message: fmt.Sprintf("filesystem %q: path not absolute", f.Name),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
+
+func (m Mount) Validate() report.Report {
+ r := report.Report{}
+ switch m.Format {
+ case "ext4", "btrfs", "xfs", "swap":
+ default:
+ r.Add(report.Entry{
+ Message: ErrFilesystemInvalidFormat.Error(),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
+
+func (m Mount) ValidateDevice() report.Report {
+ r := report.Report{}
+ if err := validatePath(m.Device); err != nil {
+ r.Add(report.Entry{
+ Message: err.Error(),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/ignition.go b/vendor/github.com/coreos/ignition/config/types/ignition.go
new file mode 100644
index 00000000..661b898d
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/ignition.go
@@ -0,0 +1,59 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+
+ "github.com/coreos/go-semver/semver"
+
+ "github.com/coreos/ignition/config/validate/report"
+)
+
+var (
+ ErrOldVersion = errors.New("incorrect config version (too old)")
+ ErrNewVersion = errors.New("incorrect config version (too new)")
+ ErrInvalidVersion = errors.New("invalid config version (couldn't parse)")
+)
+
+func (c ConfigReference) ValidateSource() report.Report {
+ r := report.Report{}
+ err := validateURL(c.Source)
+ if err != nil {
+ r.Add(report.Entry{
+ Message: err.Error(),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
+
+func (v Ignition) Semver() (*semver.Version, error) {
+ return semver.NewVersion(v.Version)
+}
+
+func (v Ignition) Validate() report.Report {
+ tv, err := v.Semver()
+ if err != nil {
+ return report.ReportFromError(ErrInvalidVersion, report.EntryError)
+ }
+ if MaxVersion.Major > tv.Major {
+ return report.ReportFromError(ErrOldVersion, report.EntryError)
+ }
+ if MaxVersion.LessThan(*tv) {
+ return report.ReportFromError(ErrNewVersion, report.EntryError)
+ }
+ return report.Report{}
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/link.go b/vendor/github.com/coreos/ignition/config/types/link.go
new file mode 100644
index 00000000..1b7794c0
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/link.go
@@ -0,0 +1,35 @@
+// Copyright 2017 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+
+ "github.com/coreos/ignition/config/validate/report"
+)
+
+func (s Link) Validate() report.Report {
+ r := report.Report{}
+ if !s.Hard {
+ err := validatePath(s.Target)
+ if err != nil {
+ r.Add(report.Entry{
+ Message: fmt.Sprintf("problem with target path %q: %v", s.Target, err),
+ Kind: report.EntryError,
+ })
+ }
+ }
+ return r
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/mode.go b/vendor/github.com/coreos/ignition/config/types/mode.go
new file mode 100644
index 00000000..09608b51
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/mode.go
@@ -0,0 +1,30 @@
+// Copyright 2017 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+)
+
+var (
+ ErrFileIllegalMode = errors.New("illegal file mode")
+)
+
+func validateMode(m int) error {
+ if m < 0 || m > 07777 {
+ return ErrFileIllegalMode
+ }
+ return nil
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/node.go b/vendor/github.com/coreos/ignition/config/types/node.go
new file mode 100644
index 00000000..88ffca8a
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/node.go
@@ -0,0 +1,56 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "path/filepath"
+
+ "github.com/coreos/ignition/config/validate/report"
+)
+
+var (
+ ErrNoFilesystem = errors.New("no filesystem specified")
+)
+
+func (n Node) ValidateFilesystem() report.Report {
+ r := report.Report{}
+ if n.Filesystem == "" {
+ r.Add(report.Entry{
+ Message: ErrNoFilesystem.Error(),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
+
+func (n Node) ValidatePath() report.Report {
+ r := report.Report{}
+ if err := validatePath(n.Path); err != nil {
+ r.Add(report.Entry{
+ Message: err.Error(),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
+
+func (n Node) Depth() int {
+ count := 0
+ for p := filepath.Clean(string(n.Path)); p != "/"; count++ {
+ p = filepath.Dir(p)
+ }
+ return count
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/partition.go b/vendor/github.com/coreos/ignition/config/types/partition.go
new file mode 100644
index 00000000..4473b667
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/partition.go
@@ -0,0 +1,73 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+
+ "github.com/coreos/ignition/config/validate/report"
+)
+
+const (
+ guidRegexStr = "^(|[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12})$"
+)
+
+var (
+ ErrLabelTooLong = errors.New("partition labels may not exceed 36 characters")
+ ErrDoesntMatchGUIDRegex = errors.New("doesn't match the form \"01234567-89AB-CDEF-EDCB-A98765432101\"")
+)
+
+func (p Partition) ValidateLabel() report.Report {
+ r := report.Report{}
+ // http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries:
+ // 56 (0x38) 72 bytes Partition name (36 UTF-16LE code units)
+
+ // XXX(vc): note GPT calls it a name, we're using label for consistency
+ // with udev naming /dev/disk/by-partlabel/*.
+ if len(p.Label) > 36 {
+ r.Add(report.Entry{
+ Message: ErrLabelTooLong.Error(),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
+
+func (p Partition) ValidateTypeGUID() report.Report {
+ return validateGUID(p.TypeGUID)
+}
+
+func (p Partition) ValidateGUID() report.Report {
+ return validateGUID(p.GUID)
+}
+
+func validateGUID(guid string) report.Report {
+ r := report.Report{}
+ ok, err := regexp.MatchString(guidRegexStr, guid)
+ if err != nil {
+ r.Add(report.Entry{
+ Message: fmt.Sprintf("error matching guid regexp: %v", err),
+ Kind: report.EntryError,
+ })
+ } else if !ok {
+ r.Add(report.Entry{
+ Message: ErrDoesntMatchGUIDRegex.Error(),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/path.go b/vendor/github.com/coreos/ignition/config/types/path.go
new file mode 100644
index 00000000..0bdbdcb0
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/path.go
@@ -0,0 +1,31 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "path"
+)
+
+var (
+ ErrPathRelative = errors.New("path not absolute")
+)
+
+func validatePath(p string) error {
+ if !path.IsAbs(p) {
+ return ErrPathRelative
+ }
+ return nil
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/raid.go b/vendor/github.com/coreos/ignition/config/types/raid.go
new file mode 100644
index 00000000..4dffa296
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/raid.go
@@ -0,0 +1,58 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+
+ "github.com/coreos/ignition/config/validate/report"
+)
+
+func (n Raid) ValidateLevel() report.Report {
+ r := report.Report{}
+ switch n.Level {
+ case "linear", "raid0", "0", "stripe":
+ if n.Spares != 0 {
+ r.Add(report.Entry{
+ Message: fmt.Sprintf("spares unsupported for %q arrays", n.Level),
+ Kind: report.EntryError,
+ })
+ }
+ case "raid1", "1", "mirror":
+ case "raid4", "4":
+ case "raid5", "5":
+ case "raid6", "6":
+ case "raid10", "10":
+ default:
+ r.Add(report.Entry{
+ Message: fmt.Sprintf("unrecognized raid level: %q", n.Level),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
+
+func (n Raid) ValidateDevices() report.Report {
+ r := report.Report{}
+ for d := range n.Devices {
+ if err := validatePath(string(d)); err != nil {
+ r.Add(report.Entry{
+ Message: fmt.Sprintf("array %q: device path not absolute: %q", n.Name, d),
+ Kind: report.EntryError,
+ })
+ }
+ }
+ return r
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/schema.go b/vendor/github.com/coreos/ignition/config/types/schema.go
new file mode 100644
index 00000000..265ae7ce
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/schema.go
@@ -0,0 +1,200 @@
+package types
+
+// generated by "schematyper --package=types schema/ignition.json -o config/types/schema.go --root-type=Config" -- DO NOT EDIT
+
+type Config struct {
+ Ignition Ignition `json:"ignition"`
+ Networkd Networkd `json:"networkd,omitempty"`
+ Passwd Passwd `json:"passwd,omitempty"`
+ Storage Storage `json:"storage,omitempty"`
+ Systemd Systemd `json:"systemd,omitempty"`
+}
+
+type ConfigReference struct {
+ Source string `json:"source,omitempty"`
+ Verification Verification `json:"verification,omitempty"`
+}
+
+type Create struct {
+ Force bool `json:"force,omitempty"`
+ Options []Option `json:"options,omitempty"`
+}
+
+type Device string
+
+type Directory struct {
+ Node
+ DirectoryEmbedded1
+}
+
+type DirectoryEmbedded1 struct {
+ Mode int `json:"mode,omitempty"`
+}
+
+type Disk struct {
+ Device string `json:"device,omitempty"`
+ Partitions []Partition `json:"partitions,omitempty"`
+ WipeTable bool `json:"wipeTable,omitempty"`
+}
+
+type Dropin struct {
+ Contents string `json:"contents,omitempty"`
+ Name string `json:"name,omitempty"`
+}
+
+type File struct {
+ Node
+ FileEmbedded1
+}
+
+type FileContents struct {
+ Compression string `json:"compression,omitempty"`
+ Source string `json:"source,omitempty"`
+ Verification Verification `json:"verification,omitempty"`
+}
+
+type FileEmbedded1 struct {
+ Contents FileContents `json:"contents,omitempty"`
+ Mode int `json:"mode,omitempty"`
+}
+
+type Filesystem struct {
+ Mount *Mount `json:"mount,omitempty"`
+ Name string `json:"name,omitempty"`
+ Path *string `json:"path,omitempty"`
+}
+
+type Ignition struct {
+ Config IgnitionConfig `json:"config,omitempty"`
+ Timeouts Timeouts `json:"timeouts,omitempty"`
+ Version string `json:"version,omitempty"`
+}
+
+type IgnitionConfig struct {
+ Append []ConfigReference `json:"append,omitempty"`
+ Replace *ConfigReference `json:"replace,omitempty"`
+}
+
+type Link struct {
+ Node
+ LinkEmbedded1
+}
+
+type LinkEmbedded1 struct {
+ Hard bool `json:"hard,omitempty"`
+ Target string `json:"target,omitempty"`
+}
+
+type Mount struct {
+ Create *Create `json:"create,omitempty"`
+ Device string `json:"device,omitempty"`
+ Format string `json:"format,omitempty"`
+}
+
+type Networkd struct {
+ Units []Networkdunit `json:"units,omitempty"`
+}
+
+type Networkdunit struct {
+ Contents string `json:"contents,omitempty"`
+ Name string `json:"name,omitempty"`
+}
+
+type Node struct {
+ Filesystem string `json:"filesystem,omitempty"`
+ Group NodeGroup `json:"group,omitempty"`
+ Path string `json:"path,omitempty"`
+ User NodeUser `json:"user,omitempty"`
+}
+
+type NodeGroup struct {
+ ID int `json:"id,omitempty"`
+}
+
+type NodeUser struct {
+ ID int `json:"id,omitempty"`
+}
+
+type Option string
+
+type Partition struct {
+ GUID string `json:"guid,omitempty"`
+ Label string `json:"label,omitempty"`
+ Number int `json:"number,omitempty"`
+ Size int `json:"size,omitempty"`
+ Start int `json:"start,omitempty"`
+ TypeGUID string `json:"typeGuid,omitempty"`
+}
+
+type Passwd struct {
+ Groups []PasswdGroup `json:"groups,omitempty"`
+ Users []PasswdUser `json:"users,omitempty"`
+}
+
+type PasswdGroup struct {
+ Gid *int `json:"gid,omitempty"`
+ Name string `json:"name,omitempty"`
+ PasswordHash string `json:"passwordHash,omitempty"`
+ System bool `json:"system,omitempty"`
+}
+
+type PasswdUser struct {
+ Create *Usercreate `json:"create,omitempty"`
+ Name string `json:"name,omitempty"`
+ PasswordHash string `json:"passwordHash,omitempty"`
+ SSHAuthorizedKeys []SSHAuthorizedKey `json:"sshAuthorizedKeys,omitempty"`
+}
+
+type Raid struct {
+ Devices []Device `json:"devices,omitempty"`
+ Level string `json:"level,omitempty"`
+ Name string `json:"name,omitempty"`
+ Spares int `json:"spares,omitempty"`
+}
+
+type SSHAuthorizedKey string
+
+type Storage struct {
+ Directories []Directory `json:"directories,omitempty"`
+ Disks []Disk `json:"disks,omitempty"`
+ Files []File `json:"files,omitempty"`
+ Filesystems []Filesystem `json:"filesystems,omitempty"`
+ Links []Link `json:"links,omitempty"`
+ Raid []Raid `json:"raid,omitempty"`
+}
+
+type Systemd struct {
+ Units []Unit `json:"units,omitempty"`
+}
+
+type Timeouts struct {
+ HTTPResponseHeaders *int `json:"httpResponseHeaders,omitempty"`
+ HTTPTotal *int `json:"httpTotal,omitempty"`
+}
+
+type Unit struct {
+ Contents string `json:"contents,omitempty"`
+ Dropins []Dropin `json:"dropins,omitempty"`
+ Enable bool `json:"enable,omitempty"`
+ Mask bool `json:"mask,omitempty"`
+ Name string `json:"name,omitempty"`
+}
+
+type Usercreate struct {
+ Gecos string `json:"gecos,omitempty"`
+ Groups []UsercreateGroup `json:"groups,omitempty"`
+ HomeDir string `json:"homeDir,omitempty"`
+ NoCreateHome bool `json:"noCreateHome,omitempty"`
+ NoLogInit bool `json:"noLogInit,omitempty"`
+ NoUserGroup bool `json:"noUserGroup,omitempty"`
+ PrimaryGroup string `json:"primaryGroup,omitempty"`
+ Shell string `json:"shell,omitempty"`
+ System bool `json:"system,omitempty"`
+ UID *int `json:"uid,omitempty"`
+}
+
+type UsercreateGroup string
+
+type Verification struct {
+ Hash *string `json:"hash,omitempty"`
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/unit.go b/vendor/github.com/coreos/ignition/config/types/unit.go
new file mode 100644
index 00000000..3d3fc8db
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/unit.go
@@ -0,0 +1,109 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "path"
+
+ "github.com/coreos/go-systemd/unit"
+
+ "github.com/coreos/ignition/config/validate/report"
+)
+
+var (
+ ErrInvalidSystemdExt = errors.New("invalid systemd unit extension")
+ ErrInvalidNetworkdExt = errors.New("invalid networkd unit extension")
+)
+
+func (u Unit) ValidateContents() report.Report {
+ r := report.Report{}
+ if err := validateUnitContent(u.Contents); err != nil {
+ r.Add(report.Entry{
+ Message: err.Error(),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
+
+func (u Unit) ValidateName() report.Report {
+ r := report.Report{}
+ switch path.Ext(u.Name) {
+ case ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice", ".scope":
+ default:
+ r.Add(report.Entry{
+ Message: ErrInvalidSystemdExt.Error(),
+ Kind: report.EntryError,
+ })
+ }
+ return r
+}
+
+func (d Dropin) Validate() report.Report {
+ r := report.Report{}
+
+ if err := validateUnitContent(d.Contents); err != nil {
+ r.Add(report.Entry{
+ Message: err.Error(),
+ Kind: report.EntryError,
+ })
+ }
+
+ switch path.Ext(d.Name) {
+ case ".conf":
+ default:
+ r.Add(report.Entry{
+ Message: fmt.Sprintf("invalid systemd unit drop-in extension: %q", path.Ext(d.Name)),
+ Kind: report.EntryError,
+ })
+ }
+
+ return r
+}
+
+func (u Networkdunit) Validate() report.Report {
+ r := report.Report{}
+
+ if err := validateUnitContent(u.Contents); err != nil {
+ r.Add(report.Entry{
+ Message: err.Error(),
+ Kind: report.EntryError,
+ })
+ }
+
+ switch path.Ext(u.Name) {
+ case ".link", ".netdev", ".network":
+ default:
+ r.Add(report.Entry{
+ Message: ErrInvalidNetworkdExt.Error(),
+ Kind: report.EntryError,
+ })
+ }
+
+ return r
+}
+
+func validateUnitContent(content string) error {
+ c := bytes.NewBufferString(content)
+ _, err := unit.Deserialize(c)
+ if err != nil {
+ return fmt.Errorf("invalid unit content: %s", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/url.go b/vendor/github.com/coreos/ignition/config/types/url.go
new file mode 100644
index 00000000..e80bf939
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/url.go
@@ -0,0 +1,49 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "net/url"
+
+ "github.com/vincent-petithory/dataurl"
+)
+
+var (
+ ErrInvalidScheme = errors.New("invalid url scheme")
+)
+
+func validateURL(s string) error {
+ // Empty url is valid, indicates an empty file
+ if s == "" {
+ return nil
+ }
+ u, err := url.Parse(s)
+ if err != nil {
+ return err
+ }
+
+ switch u.Scheme {
+ case "http", "https", "oem":
+ return nil
+ case "data":
+ if _, err := dataurl.DecodeString(s); err != nil {
+ return err
+ }
+ return nil
+ default:
+ return ErrInvalidScheme
+ }
+}
diff --git a/vendor/github.com/coreos/ignition/config/types/verification.go b/vendor/github.com/coreos/ignition/config/types/verification.go
new file mode 100644
index 00000000..d2158b80
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/types/verification.go
@@ -0,0 +1,83 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "crypto"
+ "encoding/hex"
+ "errors"
+ "strings"
+
+ "github.com/coreos/ignition/config/validate/report"
+)
+
+var (
+ ErrHashMalformed = errors.New("malformed hash specifier")
+ ErrHashWrongSize = errors.New("incorrect size for hash sum")
+ ErrHashUnrecognized = errors.New("unrecognized hash function")
+)
+
+// HashParts will return the sum and function (in that order) of the hash stored
+// in this Verification, or an error if there is an issue during parsing.
+func (v Verification) HashParts() (string, string, error) {
+ if v.Hash == nil {
+ // The hash can be nil
+ return "", "", nil
+ }
+ parts := strings.SplitN(*v.Hash, "-", 2)
+ if len(parts) != 2 {
+ return "", "", ErrHashMalformed
+ }
+
+ return parts[0], parts[1], nil
+}
+
+func (v Verification) Validate() report.Report {
+ r := report.Report{}
+
+ if v.Hash == nil {
+ // The hash can be nil
+ return r
+ }
+
+ function, sum, err := v.HashParts()
+ if err != nil {
+ r.Add(report.Entry{
+ Message: err.Error(),
+ Kind: report.EntryError,
+ })
+ return r
+ }
+ var hash crypto.Hash
+ switch function {
+ case "sha512":
+ hash = crypto.SHA512
+ default:
+ r.Add(report.Entry{
+ Message: ErrHashUnrecognized.Error(),
+ Kind: report.EntryError,
+ })
+ return r
+ }
+
+ if len(sum) != hex.EncodedLen(hash.Size()) {
+ r.Add(report.Entry{
+ Message: ErrHashWrongSize.Error(),
+ Kind: report.EntryError,
+ })
+ }
+
+ return r
+}
diff --git a/vendor/github.com/coreos/ignition/config/validate/report/report.go b/vendor/github.com/coreos/ignition/config/validate/report/report.go
new file mode 100644
index 00000000..e0d4fed8
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/config/validate/report/report.go
@@ -0,0 +1,158 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package report
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "sort"
+)
+
+type Report struct {
+ Entries []Entry
+}
+
+func (into *Report) Merge(from Report) {
+ into.Entries = append(into.Entries, from.Entries...)
+}
+
+func ReportFromError(err error, severity entryKind) Report {
+ if err == nil {
+ return Report{}
+ }
+ return Report{
+ Entries: []Entry{
+ {
+ Kind: severity,
+ Message: err.Error(),
+ },
+ },
+ }
+}
+
+// Sort sorts the entries by line number, then column number
+func (r *Report) Sort() {
+ sort.Sort(entries(r.Entries))
+}
+
+type entries []Entry
+
+func (e entries) Len() int {
+ return len(e)
+}
+
+func (e entries) Swap(i, j int) {
+ e[i], e[j] = e[j], e[i]
+}
+
+func (e entries) Less(i, j int) bool {
+ if e[i].Line != e[j].Line {
+ return e[i].Line < e[j].Line
+ }
+ return e[i].Column < e[j].Column
+}
+
+const (
+ EntryError entryKind = iota
+ EntryWarning
+ EntryInfo
+ EntryDeprecated
+)
+
+// AddPosition updates all the entries with Line equal to 0 and sets the Line/Column fields to line/column. This is useful for
+// when a type has a custom unmarshaller and thus can't determine an exact offset of the error with the type. In this case
+// the offset for the entire chunk of json that got unmarshalled to the type can be used instead, which is still pretty good.
+func (r *Report) AddPosition(line, col int, highlight string) {
+ for i, e := range r.Entries {
+ if e.Line == 0 {
+ r.Entries[i].Line = line
+ r.Entries[i].Column = col
+ r.Entries[i].Highlight = highlight
+ }
+ }
+}
+
+func (r *Report) Add(e Entry) {
+ r.Entries = append(r.Entries, e)
+}
+
+func (r Report) String() string {
+ var errs bytes.Buffer
+ for i, entry := range r.Entries {
+ if i != 0 {
+ // Only add line breaks on multiline reports
+ errs.WriteString("\n")
+ }
+ errs.WriteString(entry.String())
+ }
+ return errs.String()
+}
+
+// IsFatal returns if there were any errors that make the config invalid
+func (r Report) IsFatal() bool {
+ for _, entry := range r.Entries {
+ if entry.Kind == EntryError {
+ return true
+ }
+ }
+ return false
+}
+
+// IsDeprecated returns if the report has deprecations
+func (r Report) IsDeprecated() bool {
+ for _, entry := range r.Entries {
+ if entry.Kind == EntryDeprecated {
+ return true
+ }
+ }
+ return false
+}
+
+type Entry struct {
+ Kind entryKind `json:"kind"`
+ Message string `json:"message"`
+ Line int `json:"line,omitempty"`
+ Column int `json:"column,omitempty"`
+ Highlight string `json:"-"`
+}
+
+func (e Entry) String() string {
+ if e.Line != 0 {
+ return fmt.Sprintf("%s at line %d, column %d\n%s%v", e.Kind.String(), e.Line, e.Column, e.Highlight, e.Message)
+ }
+ return fmt.Sprintf("%s: %v", e.Kind.String(), e.Message)
+}
+
+type entryKind int
+
+func (e entryKind) String() string {
+ switch e {
+ case EntryError:
+ return "error"
+ case EntryWarning:
+ return "warning"
+ case EntryInfo:
+ return "info"
+ case EntryDeprecated:
+ return "deprecated"
+ default:
+ return "unknown error"
+ }
+}
+
+func (e entryKind) MarshalJSON() ([]byte, error) {
+ return json.Marshal(e.String())
+}
diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/device_alias.go b/vendor/github.com/coreos/ignition/internal/exec/util/device_alias.go
new file mode 100644
index 00000000..0b4b4474
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/internal/exec/util/device_alias.go
@@ -0,0 +1,55 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "os"
+ "path/filepath"
+)
+
+const deviceAliasDir = "/dev_aliases"
+
+// DeviceAlias returns the aliased form of the supplied path.
+// Note device paths in ignition are always absolute.
+func DeviceAlias(path string) string {
+ return filepath.Join(deviceAliasDir, filepath.Clean(path))
+}
+
+// CreateDeviceAlias creates a device alias for the supplied path.
+// On success the canonicalized path used as the alias target is returned.
+func CreateDeviceAlias(path string) (string, error) {
+ target, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ return "", err
+ }
+
+ alias := DeviceAlias(path)
+
+ if err := os.Remove(alias); err != nil {
+ if !os.IsNotExist(err) {
+ return "", err
+ }
+
+ if err = os.MkdirAll(filepath.Dir(alias), 0750); err != nil {
+ return "", err
+ }
+ }
+
+ if err = os.Symlink(target, alias); err != nil {
+ return "", err
+ }
+
+ return target, nil
+}
diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/file.go b/vendor/github.com/coreos/ignition/internal/exec/util/file.go
new file mode 100644
index 00000000..b51a9dee
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/internal/exec/util/file.go
@@ -0,0 +1,235 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "bufio"
+ "compress/gzip"
+ "encoding/hex"
+ "hash"
+ "io"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "path/filepath"
+
+ "github.com/coreos/ignition/config/types"
+ "github.com/coreos/ignition/internal/log"
+ "github.com/coreos/ignition/internal/resource"
+
+ "golang.org/x/net/context"
+)
+
+const (
+ DefaultDirectoryPermissions os.FileMode = 0755
+ DefaultFilePermissions os.FileMode = 0644
+)
+
+type File struct {
+ io.ReadCloser
+ hash.Hash
+ Path string
+ Mode os.FileMode
+ Uid int
+ Gid int
+ expectedSum string
+}
+
+func (f File) Verify() error {
+ if f.Hash == nil {
+ return nil
+ }
+ sum := f.Sum(nil)
+ encodedSum := make([]byte, hex.EncodedLen(len(sum)))
+ hex.Encode(encodedSum, sum)
+
+ if string(encodedSum) != f.expectedSum {
+ return ErrHashMismatch{
+ Calculated: string(encodedSum),
+ Expected: f.expectedSum,
+ }
+ }
+ return nil
+}
+
+// newHashedReader returns a new ReadCloser that also writes to the provided hash.
+func newHashedReader(reader io.ReadCloser, hasher hash.Hash) io.ReadCloser {
+ return struct {
+ io.Reader
+ io.Closer
+ }{
+ Reader: io.TeeReader(reader, hasher),
+ Closer: reader,
+ }
+}
+
+// RenderFile returns a *File with a Reader that downloads, hashes, and decompresses the incoming data.
+// It returns nil if f had invalid options. Errors reading/verifying/decompressing the file will
+// present themselves when the Reader is actually read from.
+func RenderFile(l *log.Logger, c *resource.HttpClient, f types.File) *File {
+ var reader io.ReadCloser
+ var err error
+ var expectedSum string
+
+ // explicitly ignoring the error here because the config should already be
+ // validated by this point
+ u, _ := url.Parse(f.Contents.Source)
+
+ reader, err = resource.FetchAsReader(l, c, context.Background(), *u)
+ if err != nil {
+ l.Crit("Error fetching file %q: %v", f.Path, err)
+ return nil
+ }
+
+ fileHash, err := GetHasher(f.Contents.Verification)
+ if err != nil {
+ l.Crit("Error verifying file %q: %v", f.Path, err)
+ return nil
+ }
+
+ if fileHash != nil {
+ reader = newHashedReader(reader, fileHash)
+ // explicitly ignoring the error here because the config should already
+ // be validated by this point
+ _, expectedSum, _ = f.Contents.Verification.HashParts()
+ }
+
+ reader, err = decompressFileStream(l, f, reader)
+ if err != nil {
+ l.Crit("Error decompressing file %q: %v", f.Path, err)
+ return nil
+ }
+
+ return &File{
+ Path: f.Path,
+ ReadCloser: reader,
+ Hash: fileHash,
+ Mode: os.FileMode(f.Mode),
+ Uid: f.User.ID,
+ Gid: f.Group.ID,
+ expectedSum: expectedSum,
+ }
+}
+
+// gzipReader is a wrapper for gzip's reader that closes the stream it wraps as well
+// as itself when Close() is called.
+type gzipReader struct {
+ *gzip.Reader //actually a ReadCloser
+ source io.Closer
+}
+
+func newGzipReader(reader io.ReadCloser) (io.ReadCloser, error) {
+ gzReader, err := gzip.NewReader(reader)
+ if err != nil {
+ return nil, err
+ }
+ return gzipReader{
+ Reader: gzReader,
+ source: reader,
+ }, nil
+}
+
+func (gz gzipReader) Close() error {
+ if err := gz.Reader.Close(); err != nil {
+ return err
+ }
+ if err := gz.source.Close(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func decompressFileStream(l *log.Logger, f types.File, contents io.ReadCloser) (io.ReadCloser, error) {
+ switch f.Contents.Compression {
+ case "":
+ return contents, nil
+ case "gzip":
+ return newGzipReader(contents)
+ default:
+ return nil, types.ErrCompressionInvalid
+ }
+}
+
+func (u Util) WriteLink(s types.Link) error {
+ path := u.JoinPath(s.Path)
+
+ if err := MkdirForFile(path); err != nil {
+ return err
+ }
+
+ if s.Hard {
+ return os.Link(s.Target, path)
+ }
+ return os.Symlink(s.Target, path)
+}
+
+// WriteFile creates and writes the file described by f using the provided context.
+func (u Util) WriteFile(f *File) error {
+ defer f.Close()
+ var err error
+
+ path := u.JoinPath(string(f.Path))
+
+ if err := MkdirForFile(path); err != nil {
+ return err
+ }
+
+ // Create a temporary file in the same directory to ensure it's on the same filesystem
+ var tmp *os.File
+ if tmp, err = ioutil.TempFile(filepath.Dir(path), "tmp"); err != nil {
+ return err
+ }
+
+ defer func() {
+ tmp.Close()
+ if err != nil {
+ os.Remove(tmp.Name())
+ }
+ }()
+
+ fileWriter := bufio.NewWriter(tmp)
+
+ if _, err = io.Copy(fileWriter, f); err != nil {
+ return err
+ }
+ fileWriter.Flush()
+
+ if err = f.Verify(); err != nil {
+ return err
+ }
+
+ // XXX(vc): Note that we assume to be operating on the file we just wrote, this is only guaranteed
+ // by using syscall.Fchown() and syscall.Fchmod()
+
+ // Ensure the ownership and mode are as requested (since WriteFile can be affected by sticky bit)
+ if err = os.Chown(tmp.Name(), f.Uid, f.Gid); err != nil {
+ return err
+ }
+
+ if err = os.Chmod(tmp.Name(), f.Mode); err != nil {
+ return err
+ }
+
+ if err = os.Rename(tmp.Name(), path); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// MkdirForFile helper creates the directory components of path.
+func MkdirForFile(path string) error {
+ return os.MkdirAll(filepath.Dir(path), DefaultDirectoryPermissions)
+}
diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/passwd.go b/vendor/github.com/coreos/ignition/internal/exec/util/passwd.go
new file mode 100644
index 00000000..4252de35
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/internal/exec/util/passwd.go
@@ -0,0 +1,191 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "fmt"
+ "os/exec"
+ "strconv"
+ "strings"
+
+ "github.com/coreos/ignition/config/types"
+
+ keys "github.com/coreos/update-ssh-keys/authorized_keys_d"
+)
+
+// CreateUser creates the user as described.
+func (u Util) CreateUser(c types.PasswdUser) error {
+ if c.Create == nil {
+ return nil
+ }
+
+ cu := c.Create
+ args := []string{"--root", u.DestDir}
+
+ if c.PasswordHash != "" {
+ args = append(args, "--password", c.PasswordHash)
+ } else {
+ args = append(args, "--password", "*")
+ }
+
+ if cu.UID != nil {
+ args = append(args, "--uid",
+ strconv.FormatUint(uint64(*cu.UID), 10))
+ }
+
+ if cu.Gecos != "" {
+ args = append(args, "--comment", fmt.Sprintf("%q", cu.Gecos))
+ }
+
+ if cu.HomeDir != "" {
+ args = append(args, "--home-dir", cu.HomeDir)
+ }
+
+ if cu.NoCreateHome {
+ args = append(args, "--no-create-home")
+ } else {
+ args = append(args, "--create-home")
+ }
+
+ if cu.PrimaryGroup != "" {
+ args = append(args, "--gid", cu.PrimaryGroup)
+ }
+
+ if len(cu.Groups) > 0 {
+ args = append(args, "--groups", strings.Join(translateV2_1UsercreateGroupSliceToStringSlice(cu.Groups), ","))
+ }
+
+ if cu.NoUserGroup {
+ args = append(args, "--no-user-group")
+ }
+
+ if cu.System {
+ args = append(args, "--system")
+ }
+
+ if cu.NoLogInit {
+ args = append(args, "--no-log-init")
+ }
+
+ if cu.Shell != "" {
+ args = append(args, "--shell", cu.Shell)
+ }
+
+ args = append(args, c.Name)
+
+ return u.LogCmd(exec.Command("useradd", args...),
+ "creating user %q", c.Name)
+}
+
+// golang--
+func translateV2_1UsercreateGroupSliceToStringSlice(groups []types.UsercreateGroup) []string {
+ newGroups := make([]string, len(groups))
+ for i, g := range groups {
+ newGroups[i] = string(g)
+ }
+ return newGroups
+}
+
+// Add the provided SSH public keys to the user's authorized keys.
+func (u Util) AuthorizeSSHKeys(c types.PasswdUser) error {
+ if len(c.SSHAuthorizedKeys) == 0 {
+ return nil
+ }
+
+ return u.LogOp(func() error {
+ usr, err := u.userLookup(c.Name)
+ if err != nil {
+ return fmt.Errorf("unable to lookup user %q", c.Name)
+ }
+
+ akd, err := keys.Open(usr, true)
+ if err != nil {
+ return err
+ }
+ defer akd.Close()
+
+ // TODO(vc): introduce key names to config?
+ // TODO(vc): validate c.SSHAuthorizedKeys well-formedness.
+ ks := strings.Join(translateV2_1SSHAuthorizedKeySliceToStringSlice(c.SSHAuthorizedKeys), "\n")
+ // XXX(vc): for now ensure the addition is always
+ // newline-terminated. A future version of akd will handle this
+ // for us in addition to validating the ssh keys for
+ // well-formedness.
+ if !strings.HasSuffix(ks, "\n") {
+ ks = ks + "\n"
+ }
+
+ if err := akd.Add("coreos-ignition", []byte(ks), true, true); err != nil {
+ return err
+ }
+
+ if err := akd.Sync(); err != nil {
+ return err
+ }
+
+ return nil
+ }, "adding ssh keys to user %q", c.Name)
+}
+
+// golang--
+func translateV2_1SSHAuthorizedKeySliceToStringSlice(keys []types.SSHAuthorizedKey) []string {
+ newKeys := make([]string, len(keys))
+ for i, k := range keys {
+ newKeys[i] = string(k)
+ }
+ return newKeys
+}
+
+// SetPasswordHash sets the password hash of the specified user.
+func (u Util) SetPasswordHash(c types.PasswdUser) error {
+ if c.PasswordHash == "" {
+ return nil
+ }
+
+ args := []string{
+ "--root", u.DestDir,
+ "--password", c.PasswordHash,
+ }
+
+ args = append(args, c.Name)
+
+ return u.LogCmd(exec.Command("usermod", args...),
+ "setting password for %q", c.Name)
+}
+
+// CreateGroup creates the group as described.
+func (u Util) CreateGroup(g types.PasswdGroup) error {
+ args := []string{"--root", u.DestDir}
+
+ if g.Gid != nil {
+ args = append(args, "--gid",
+ strconv.FormatUint(uint64(*g.Gid), 10))
+ }
+
+ if g.PasswordHash != "" {
+ args = append(args, "--password", g.PasswordHash)
+ } else {
+ args = append(args, "--password", "*")
+ }
+
+ if g.System {
+ args = append(args, "--system")
+ }
+
+ args = append(args, g.Name)
+
+ return u.LogCmd(exec.Command("groupadd", args...),
+ "adding group %q", g.Name)
+}
diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/path.go b/vendor/github.com/coreos/ignition/internal/exec/util/path.go
new file mode 100644
index 00000000..ec68dd96
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/internal/exec/util/path.go
@@ -0,0 +1,31 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "path/filepath"
+)
+
+func SystemdUnitsPath() string {
+ return filepath.Join("etc", "systemd", "system")
+}
+
+func NetworkdUnitsPath() string {
+ return filepath.Join("etc", "systemd", "network")
+}
+
+func SystemdDropinsPath(unitName string) string {
+ return filepath.Join("etc", "systemd", "system", unitName+".d")
+}
diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/unit.go b/vendor/github.com/coreos/ignition/internal/exec/util/unit.go
new file mode 100644
index 00000000..0f383bbb
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/internal/exec/util/unit.go
@@ -0,0 +1,80 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/coreos/ignition/config/types"
+)
+
+const (
+ presetPath string = "/etc/systemd/system-preset/20-ignition.preset"
+ DefaultPresetPermissions os.FileMode = 0644
+)
+
+func FileFromSystemdUnit(unit types.Unit) *File {
+ return &File{
+ Path: filepath.Join(SystemdUnitsPath(), string(unit.Name)),
+ ReadCloser: ioutil.NopCloser(bytes.NewReader([]byte(unit.Contents))),
+ Mode: DefaultFilePermissions,
+ }
+}
+
+func FileFromNetworkdUnit(unit types.Networkdunit) *File {
+ return &File{
+ Path: filepath.Join(NetworkdUnitsPath(), string(unit.Name)),
+ ReadCloser: ioutil.NopCloser(bytes.NewReader([]byte(unit.Contents))),
+ Mode: DefaultFilePermissions,
+ }
+}
+
+func FileFromUnitDropin(unit types.Unit, dropin types.Dropin) *File {
+ return &File{
+ Path: filepath.Join(SystemdDropinsPath(string(unit.Name)), string(dropin.Name)),
+ ReadCloser: ioutil.NopCloser(bytes.NewReader([]byte(dropin.Contents))),
+ Mode: DefaultFilePermissions,
+ }
+}
+
+func (u Util) MaskUnit(unit types.Unit) error {
+ path := u.JoinPath(SystemdUnitsPath(), string(unit.Name))
+ if err := MkdirForFile(path); err != nil {
+ return err
+ }
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ return os.Symlink("/dev/null", path)
+}
+
+func (u Util) EnableUnit(unit types.Unit) error {
+ path := u.JoinPath(presetPath)
+ if err := MkdirForFile(path); err != nil {
+ return err
+ }
+ file, err := os.OpenFile(path, os.O_RDWR|os.O_APPEND|os.O_CREATE, DefaultPresetPermissions)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ _, err = file.WriteString(fmt.Sprintf("enable %s\n", unit.Name))
+ return err
+}
diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.c b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.c
new file mode 100644
index 00000000..a63da3f6
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.c
@@ -0,0 +1,139 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#define _GNU_SOURCE
+#include <errno.h>
+#include <pwd.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "user_lookup.h"
+
+#define STACK_SIZE (64 * 1024)
+
+/* This is all a bit copy-and-pasty from update-ssh-keys/authorized_keys_d,
+ * TODO(vc): refactor authorized_keys_d a bit so external packages can reuse
+ * the pieces duplicated here.
+ */
+typedef struct user_lookup_ctxt {
+ void *stack;
+
+ const char *name;
+ const char *root;
+
+ user_lookup_res_t *res;
+ int ret;
+ int err;
+} user_lookup_ctxt_t;
+
+
+static int user_lookup_fn(user_lookup_ctxt_t *ctxt) {
+ char buf[16 * 1024];
+ struct passwd p, *pptr;
+
+ if(chroot(ctxt->root) == -1) {
+ goto out_err;
+ }
+
+ if(getpwnam_r(ctxt->name, &p, buf, sizeof(buf), &pptr) != 0 || !pptr) {
+ goto out_err;
+ }
+
+ if(!(ctxt->res->name = strdup(p.pw_name))) {
+ goto out_err;
+ }
+
+ if(!(ctxt->res->home = strdup(p.pw_dir))) {
+ free(ctxt->res->name);
+ goto out_err;
+ }
+
+ ctxt->res->uid = p.pw_uid;
+ ctxt->res->gid = p.pw_gid;
+
+ return 0;
+
+out_err:
+ ctxt->err = errno;
+ ctxt->ret = -1;
+ return 0;
+}
+
+/* user_lookup() looks up a user in a chroot.
+ * returns 0 and the results in res on success,
+ * res->name will be NULL if user doesn't exist.
+ * returns -1 on error.
+ */
+int user_lookup(const char *root, const char *name, user_lookup_res_t *res) {
+ user_lookup_ctxt_t ctxt = {
+ .root = root,
+ .name = name,
+ .res = res,
+ .ret = 0
+ };
+ int pid, ret = 0;
+ sigset_t allsigs, orig;
+
+ if(!(ctxt.stack = malloc(STACK_SIZE))) {
+ ret = -1;
+ goto out;
+ }
+
+ /* It's necessary to block all signals before cloning, so the child
+ * doesn't run any of the Go runtime's signal handlers.
+ */
+ if((ret = sigemptyset(&orig)) == -1 ||
+ (ret = sigfillset(&allsigs)) == -1)
+ goto out_stack;
+
+ if((ret = sigprocmask(SIG_BLOCK, &allsigs, &orig)) == -1)
+ goto out_stack;
+
+ pid = clone((int(*)(void *))user_lookup_fn, ctxt.stack + STACK_SIZE,
+ CLONE_VM, &ctxt);
+
+ ret = sigprocmask(SIG_SETMASK, &orig, NULL);
+
+ if(pid != -1) {
+ if(waitpid(pid, NULL, __WCLONE) == -1 && errno != ECHILD) {
+ ret = -1;
+ goto out_stack;
+ }
+ } else {
+ ret = -1;
+ }
+
+ if(ret != -1) {
+ errno = ctxt.err;
+ ret = ctxt.ret;
+ }
+
+out_stack:
+ free(ctxt.stack);
+
+out:
+ return ret;
+}
+
+/* user_lookup_res_free() frees any memory allocated by a successful user_lookup(). */
+void user_lookup_res_free(user_lookup_res_t *res) {
+ free(res->home);
+ free(res->name);
+}
diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.go b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.go
new file mode 100644
index 00000000..85d34919
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.go
@@ -0,0 +1,50 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package util
+
+// #include "user_lookup.h"
+import "C"
+
+import (
+ "fmt"
+ "os/user"
+)
+
+// userLookup looks up the user in u.DestDir.
+func (u Util) userLookup(name string) (*user.User, error) {
+ res := &C.user_lookup_res_t{}
+
+ if ret, err := C.user_lookup(C.CString(u.DestDir),
+ C.CString(name), res); ret < 0 {
+ return nil, fmt.Errorf("lookup failed: %v", err)
+ }
+
+ if res.name == nil {
+ return nil, fmt.Errorf("user %q not found", name)
+ }
+
+ usr := &user.User{
+ Name: C.GoString(res.name),
+ Uid: fmt.Sprintf("%d", int(res.uid)),
+ Gid: fmt.Sprintf("%d", int(res.gid)),
+ HomeDir: u.JoinPath(C.GoString(res.home)),
+ }
+
+ C.user_lookup_res_free(res)
+
+ return usr, nil
+}
diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.h b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.h
new file mode 100644
index 00000000..8178ee41
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/internal/exec/util/user_lookup.h
@@ -0,0 +1,23 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+typedef struct user_lookup_res {
+ int uid;
+ int gid;
+ char *home;
+ char *name;
+} user_lookup_res_t;
+
+int user_lookup(const char *, const char *, user_lookup_res_t *);
+void user_lookup_res_free(user_lookup_res_t *);
diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/util.go b/vendor/github.com/coreos/ignition/internal/exec/util/util.go
new file mode 100644
index 00000000..31fd33e9
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/internal/exec/util/util.go
@@ -0,0 +1,32 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "path/filepath"
+
+ "github.com/coreos/ignition/internal/log"
+)
+
+// Util encapsulates logging and destdir indirection for the util methods.
+type Util struct {
+ DestDir string // directory prefix to use in applying fs paths.
+ *log.Logger
+}
+
+// JoinPath returns a path into the context ala filepath.Join(d, args)
+func (u Util) JoinPath(path ...string) string {
+ return filepath.Join(u.DestDir, filepath.Join(path...))
+}
diff --git a/vendor/github.com/coreos/ignition/internal/exec/util/verification.go b/vendor/github.com/coreos/ignition/internal/exec/util/verification.go
new file mode 100644
index 00000000..77a74603
--- /dev/null
+++ b/vendor/github.com/coreos/ignition/internal/exec/util/verification.go
@@ -0,0 +1,81 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "crypto/sha512"
+ "encoding/hex"
+ "fmt"
+ "hash"
+
+ "github.com/coreos/ignition/config/types"
+)
+
+type ErrHashMismatch struct {
+ Calculated string
+ Expected string
+}
+
+func (e ErrHashMismatch) Error() string {
+ return fmt.Sprintf("hash verification failed (calculated %s but expected %s)",
+ e.Calculated, e.Expected)
+}
+
+func AssertValid(verify types.Verification, data []byte) error {
+ if hash := verify.Hash; hash != nil {
+ hashFunc, hashSum, err := verify.HashParts()
+ if err != nil {
+ return err
+ }
+
+ var sum []byte
+ switch hashFunc {
+ case "sha512":
+ rawSum := sha512.Sum512(data)
+ sum = rawSum[:]
+ default:
+ return types.ErrHashUnrecognized
+ }
+
+ encodedSum := make([]byte, hex.EncodedLen(len(sum)))
+ hex.Encode(encodedSum, sum)
+ if string(encodedSum) != hashSum {
+ return ErrHashMismatch{
+ Calculated: string(encodedSum),
+ Expected: hashSum,
+ }
+ }
+ }
+
+ return nil
+}
+
+func GetHasher(verify types.Verification) (hash.Hash, error) {
+ if verify.Hash == nil {
+ return nil, nil
+ }
+
+ function, _, err := verify.HashParts()
+ if err != nil {
+ return nil, err
+ }
+
+ switch function {
+ case "sha512":
+ return sha512.New(), nil
+ default:
+ return nil, types.ErrHashUnrecognized
+ }
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 00000000..c8364161
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md
new file mode 100644
index 00000000..26243044
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/README.md
@@ -0,0 +1,205 @@
+go-spew
+=======
+
+[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)]
+(https://travis-ci.org/davecgh/go-spew) [![ISC License]
+(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
+(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
+(https://coveralls.io/r/davecgh/go-spew?branch=master)
+
+
+Go-spew implements a deep pretty printer for Go data structures to aid in
+debugging. A comprehensive suite of tests with 100% test coverage is provided
+to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
+report. Go-spew is licensed under the liberal ISC license, so it may be used in
+open source or commercial projects.
+
+If you're interested in reading about how this package came to life and some
+of the challenges involved in providing a deep pretty printer, there is a blog
+post about it
+[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
+
+## Documentation
+
+[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
+(http://godoc.org/github.com/davecgh/go-spew/spew)
+
+Full `go doc` style documentation for the project can be viewed online without
+installing this package by using the excellent GoDoc site here:
+http://godoc.org/github.com/davecgh/go-spew/spew
+
+You can also view the documentation locally once the package is installed with
+the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
+http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
+
+## Installation
+
+```bash
+$ go get -u github.com/davecgh/go-spew/spew
+```
+
+## Quick Start
+
+Add this import line to the file you're working in:
+
+```Go
+import "github.com/davecgh/go-spew/spew"
+```
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+
+```Go
+spew.Dump(myVar1, myVar2, ...)
+spew.Fdump(someWriter, myVar1, myVar2, ...)
+str := spew.Sdump(myVar1, myVar2, ...)
+```
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
+compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
+and pointer addresses):
+
+```Go
+spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+```
+
+## Debugging a Web Application Example
+
+Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
+
+```Go
+package main
+
+import (
+ "fmt"
+ "html"
+ "net/http"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/html")
+ fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
+ fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
+}
+
+func main() {
+ http.HandleFunc("/", handler)
+ http.ListenAndServe(":8080", nil)
+}
+```
+
+## Sample Dump Output
+
+```
+(main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) {
+ (string) "one": (bool) true
+ }
+}
+([]uint8) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+}
+```
+
+## Sample Formatter Output
+
+Double pointer to a uint8:
+```
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+```
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+```
+ %v: <*>{1 <*><shown>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+```
+
+## Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available via the
+spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+```
+* Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+* MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+* DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+* DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables. This option
+ relies on access to the unsafe package, so it will not have any effect when
+ running in environments without access to the unsafe package such as Google
+ App Engine or with the "safe" build tag specified.
+ Pointer method invocation is enabled by default.
+
+* DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+* DisableCapacities
+ DisableCapacities specifies whether to disable the printing of capacities
+ for arrays, slices, maps and channels. This is useful when diffing data
+ structures in tests.
+
+* ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+* SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are supported,
+ with other types sorted according to the reflect.Value.String() output
+ which guarantees display stability. Natural map order is used by
+ default.
+
+* SpewKeys
+ SpewKeys specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only considered
+ if SortKeys is true.
+
+```
+
+## Unsafe Package Dependency
+
+This package relies on the unsafe package to perform some of the more advanced
+features, however it also supports a "limited" mode which allows it to work in
+environments where the unsafe package is not available. By default, it will
+operate in this mode on Google App Engine and when compiled with GopherJS. The
+"safe" build tag may also be specified to force the package to build without
+using the unsafe package.
+
+## License
+
+Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 00000000..8a4a6589
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build !js,!appengine,!safe,!disableunsafe
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+var (
+ // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
+ // internal reflect.Value fields. These values are valid before golang
+ // commit ecccf07e7f9d which changed the format. The are also valid
+ // after commit 82f48826c6c7 which changed the format again to mirror
+ // the original format. Code in the init function updates these offsets
+ // as necessary.
+ offsetPtr = uintptr(ptrSize)
+ offsetScalar = uintptr(0)
+ offsetFlag = uintptr(ptrSize * 2)
+
+ // flagKindWidth and flagKindShift indicate various bits that the
+ // reflect package uses internally to track kind information.
+ //
+ // flagRO indicates whether or not the value field of a reflect.Value is
+ // read-only.
+ //
+ // flagIndir indicates whether the value field of a reflect.Value is
+ // the actual data or a pointer to the data.
+ //
+ // These values are valid before golang commit 90a7c3c86944 which
+ // changed their positions. Code in the init function updates these
+ // flags as necessary.
+ flagKindWidth = uintptr(5)
+ flagKindShift = uintptr(flagKindWidth - 1)
+ flagRO = uintptr(1 << 0)
+ flagIndir = uintptr(1 << 1)
+)
+
+func init() {
+ // Older versions of reflect.Value stored small integers directly in the
+ // ptr field (which is named val in the older versions). Versions
+ // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
+ // scalar for this purpose which unfortunately came before the flag
+ // field, so the offset of the flag field is different for those
+ // versions.
+ //
+ // This code constructs a new reflect.Value from a known small integer
+ // and checks if the size of the reflect.Value struct indicates it has
+ // the scalar field. When it does, the offsets are updated accordingly.
+ vv := reflect.ValueOf(0xf00)
+ if unsafe.Sizeof(vv) == (ptrSize * 4) {
+ offsetScalar = ptrSize * 2
+ offsetFlag = ptrSize * 3
+ }
+
+ // Commit 90a7c3c86944 changed the flag positions such that the low
+ // order bits are the kind. This code extracts the kind from the flags
+ // field and ensures it's the correct type. When it's not, the flag
+ // order has been changed to the newer format, so the flags are updated
+ // accordingly.
+ upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
+ upfv := *(*uintptr)(upf)
+ flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
+ if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
+ flagKindShift = 0
+ flagRO = 1 << 5
+ flagIndir = 1 << 6
+
+ // Commit adf9b30e5594 modified the flags to separate the
+ // flagRO flag into two bits which specifies whether or not the
+ // field is embedded. This causes flagIndir to move over a bit
+ // and means that flagRO is the combination of either of the
+ // original flagRO bit and the new bit.
+ //
+ // This code detects the change by extracting what used to be
+ // the indirect bit to ensure it's set. When it's not, the flag
+ // order has been changed to the newer format, so the flags are
+ // updated accordingly.
+ if upfv&flagIndir == 0 {
+ flagRO = 3 << 5
+ flagIndir = 1 << 7
+ }
+ }
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
+ indirects := 1
+ vt := v.Type()
+ upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
+ rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
+ if rvf&flagIndir != 0 {
+ vt = reflect.PtrTo(v.Type())
+ indirects++
+ } else if offsetScalar != 0 {
+ // The value is in the scalar field when it's not one of the
+ // reference types.
+ switch vt.Kind() {
+ case reflect.Uintptr:
+ case reflect.Chan:
+ case reflect.Func:
+ case reflect.Map:
+ case reflect.Ptr:
+ case reflect.UnsafePointer:
+ default:
+ upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
+ offsetScalar)
+ }
+ }
+
+ pv := reflect.NewAt(vt, upv)
+ rv = pv
+ for i := 0; i < indirects; i++ {
+ rv = rv.Elem()
+ }
+ return rv
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 00000000..1fe3cf3d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 00000000..7c519ff4
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("<nil>")
+ maxNewlineBytes = []byte("<max depth reached>\n")
+ maxShortBytes = []byte("<max>")
+ circularBytes = []byte("<already shown>")
+ circularShortBytes = []byte("<shown>")
+ invalidAngleBytes = []byte("<invalid>")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 00000000..2e3d22f3
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 00000000..aacaac6f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*><shown>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 00000000..df1d582a
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound == true:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 00000000..c49875ba
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound == true:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 00000000..32c0e338
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/dmacvicar/libvirt-go/LICENSE b/vendor/github.com/dmacvicar/libvirt-go/LICENSE
new file mode 100644
index 00000000..202f5fce
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/LICENSE
@@ -0,0 +1,21 @@
+The MIT License
+
+Copyright (c) 2013 Alex Zorin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE. \ No newline at end of file
diff --git a/vendor/github.com/dmacvicar/libvirt-go/README.md b/vendor/github.com/dmacvicar/libvirt-go/README.md
new file mode 100644
index 00000000..814bd673
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/README.md
@@ -0,0 +1,49 @@
+# libvirt-go
+
+[![Build Status](http://ci.serversaurus.com/github.com/alexzorin/libvirt-go/status.svg?branch=master)](http://ci.serversaurus.com/github.com/alexzorin/libvirt-go)
+
+Go bindings for libvirt.
+
+Make sure to have `libvirt-dev` package (or the development files otherwise somewhere in your include path)
+
+## Version Support
+Currently, the only supported version of libvirt is **1.2.2**, tagged as `v2.x` releases `gopkg.in/alexzorin/libvirt-go.v2` [(docs)](http://gopkg.in/alexzorin/libvirt-go.v2).
+
+The bindings will probably work with versions of libvirt that are higher than 1.2.2, depending on what is added in those releases. However, no features are currently being added that will cause the build or tests to break against 1.2.2.
+
+### OS Compatibility Matrix
+
+To quickly see what version of libvirt your OS can easily support (may be outdated). Obviously, nothing below 1.2.2 is usable with these bindings.
+
+| OS Release | libvirt Version |
+| ------------ | ------------------------------ |
+| FC19 | 1.2.9 from libvirt.org/sources |
+| Debian 7 | 1.2.4 from wheezy-backports |
+| Debian 6 | 0.9.12 from squeeze-backports |
+| Ubuntu 14.04 | 1.2.2 from trusty |
+| RHEL 6 | 0.10.x |
+| RHEL 5 | 0.8.x |
+
+
+### 0.9.x Support
+
+Previously there was support for libvirt 0.9.8 and below, however this is no longer being updated. These releases were tagged `v1.x` at `gopkg.in/alexzorin/libvirt-go.v1` [(docs)](http://gopkg.in/alexzorin/libvirt-go.v1).
+
+## Documentation
+
+* [api documentation for the bindings](http://godoc.org/github.com/rgbkrk/libvirt-go)
+* [api documentation for libvirt](http://libvirt.org/html/libvirt-libvirt.html)
+
+## Contributing
+
+Please fork and write tests.
+
+Integration tests are available where functionality isn't provided by the test driver, see `integration_test.go`.
+
+A `Vagrantfile` is included to run the integration tests:
+
+* `cd ./vagrant/{branch}` (i.e `./vagrant/master`, where you will find a `Vagrantfile` for the `master` branch)
+* `vagrant up` to provision the virtual machine
+* `vagrant ssh` to login to the virtual machine
+
+Once inside, `sudo su -`, `cd /libvirt-go` and `go test -tags integration`.
diff --git a/vendor/github.com/dmacvicar/libvirt-go/cfuncs.go b/vendor/github.com/dmacvicar/libvirt-go/cfuncs.go
new file mode 100644
index 00000000..b099026e
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/cfuncs.go
@@ -0,0 +1,109 @@
+package libvirt
+
+/*
+ * Golang 1.6 doesn't support C pointers to go memory.
+ * A hacky-solution might be some multi-threaded approach to support domain events, but let's make it work
+ * without domain events for now.
+ */
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+
+int domainEventLifecycleCallback_cgo(virConnectPtr c, virDomainPtr d,
+ int event, int detail, void *data)
+{
+ return domainEventLifecycleCallback(c, d, event, detail, data);
+}
+
+int domainEventGenericCallback_cgo(virConnectPtr c, virDomainPtr d, void *data)
+{
+ return domainEventGenericCallback(c, d, data);
+}
+
+int domainEventRTCChangeCallback_cgo(virConnectPtr c, virDomainPtr d,
+ long long utcoffset, void *data)
+{
+ return domainEventRTCChangeCallback(c, d, utcoffset, data);
+}
+
+int domainEventWatchdogCallback_cgo(virConnectPtr c, virDomainPtr d,
+ int action, void *data)
+{
+ return domainEventWatchdogCallback(c, d, action, data);
+}
+
+int domainEventIOErrorCallback_cgo(virConnectPtr c, virDomainPtr d,
+ const char *srcPath, const char *devAlias,
+ int action, void *data)
+{
+ return domainEventIOErrorCallback(c, d, srcPath, devAlias, action, data);
+}
+
+int domainEventGraphicsCallback_cgo(virConnectPtr c, virDomainPtr d,
+ int phase, const virDomainEventGraphicsAddress *local,
+ const virDomainEventGraphicsAddress *remote,
+ const char *authScheme,
+ const virDomainEventGraphicsSubject *subject, void *data)
+{
+ return domainEventGraphicsCallback(c, d, phase, local, remote, authScheme, subject, data);
+}
+
+int domainEventIOErrorReasonCallback_cgo(virConnectPtr c, virDomainPtr d,
+ const char *srcPath, const char *devAlias,
+ int action, const char *reason, void *data)
+{
+ return domainEventIOErrorReasonCallback(c, d, srcPath, devAlias, action, reason, data);
+}
+
+int domainEventBlockJobCallback_cgo(virConnectPtr c, virDomainPtr d,
+ const char *disk, int type, int status, void *data)
+{
+ return domainEventIOErrorReasonCallback(c, d, disk, type, status, data);
+}
+
+int domainEventDiskChangeCallback_cgo(virConnectPtr c, virDomainPtr d,
+ const char *oldSrcPath, const char *newSrcPath,
+ const char *devAlias, int reason, void *data)
+{
+ return domainEventDiskChangeCallback(c, d, oldSrcPath, newSrcPath, devAlias, reason, data);
+}
+
+int domainEventTrayChangeCallback_cgo(virConnectPtr c, virDomainPtr d,
+ const char *devAlias, int reason, void *data)
+{
+ return domainEventTrayChangeCallback(c, d, devAlias, reason, data);
+}
+
+int domainEventReasonCallback_cgo(virConnectPtr c, virDomainPtr d,
+ int reason, void *data)
+{
+ return domainEventReasonCallback(c, d, reason, data);
+}
+
+int domainEventBalloonChangeCallback_cgo(virConnectPtr c, virDomainPtr d,
+ unsigned long long actual, void *data)
+{
+ return domainEventBalloonChangeCallback(c, d, actual, data);
+}
+
+int domainEventDeviceRemovedCallback_cgo(virConnectPtr c, virDomainPtr d,
+ const char *devAlias, void *data)
+{
+ return domainEventDeviceRemovedCallback(c, d, devAlias, data);
+}
+
+void freeGoCallback_cgo(void* goCallbackId) {
+ freeCallbackId((size_t)goCallbackId);
+}
+
+int virConnectDomainEventRegisterAny_cgo(virConnectPtr c, virDomainPtr d,
+ int eventID, virConnectDomainEventGenericCallback cb,
+ int goCallbackId) {
+ void* id = (void*)0 + goCallbackId; // Hack to silence the warning
+ return virConnectDomainEventRegisterAny(c, d, eventID, cb, id, freeGoCallback_cgo);
+}
+*/
+import "C"
diff --git a/vendor/github.com/dmacvicar/libvirt-go/constants.go b/vendor/github.com/dmacvicar/libvirt-go/constants.go
new file mode 100644
index 00000000..dce5cba8
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/constants.go
@@ -0,0 +1,473 @@
+package libvirt
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+*/
+import "C"
+
+// virDomainState
+const (
+ VIR_DOMAIN_NOSTATE = C.VIR_DOMAIN_NOSTATE
+ VIR_DOMAIN_RUNNING = C.VIR_DOMAIN_RUNNING
+ VIR_DOMAIN_BLOCKED = C.VIR_DOMAIN_BLOCKED
+ VIR_DOMAIN_PAUSED = C.VIR_DOMAIN_PAUSED
+ VIR_DOMAIN_SHUTDOWN = C.VIR_DOMAIN_SHUTDOWN
+ VIR_DOMAIN_CRASHED = C.VIR_DOMAIN_CRASHED
+ VIR_DOMAIN_PMSUSPENDED = C.VIR_DOMAIN_PMSUSPENDED
+ VIR_DOMAIN_SHUTOFF = C.VIR_DOMAIN_SHUTOFF
+)
+
+//virConnectListAllDomainsFlags
+const (
+ VIR_CONNECT_LIST_DOMAINS_ACTIVE = C.VIR_CONNECT_LIST_DOMAINS_ACTIVE
+ VIR_CONNECT_LIST_DOMAINS_INACTIVE = C.VIR_CONNECT_LIST_DOMAINS_INACTIVE
+ VIR_CONNECT_LIST_DOMAINS_PERSISTENT = C.VIR_CONNECT_LIST_DOMAINS_PERSISTENT
+ VIR_CONNECT_LIST_DOMAINS_TRANSIENT = C.VIR_CONNECT_LIST_DOMAINS_TRANSIENT
+ VIR_CONNECT_LIST_DOMAINS_RUNNING = C.VIR_CONNECT_LIST_DOMAINS_RUNNING
+ VIR_CONNECT_LIST_DOMAINS_PAUSED = C.VIR_CONNECT_LIST_DOMAINS_PAUSED
+ VIR_CONNECT_LIST_DOMAINS_SHUTOFF = C.VIR_CONNECT_LIST_DOMAINS_SHUTOFF
+ VIR_CONNECT_LIST_DOMAINS_OTHER = C.VIR_CONNECT_LIST_DOMAINS_OTHER
+ VIR_CONNECT_LIST_DOMAINS_MANAGEDSAVE = C.VIR_CONNECT_LIST_DOMAINS_MANAGEDSAVE
+ VIR_CONNECT_LIST_DOMAINS_NO_MANAGEDSAVE = C.VIR_CONNECT_LIST_DOMAINS_NO_MANAGEDSAVE
+ VIR_CONNECT_LIST_DOMAINS_AUTOSTART = C.VIR_CONNECT_LIST_DOMAINS_AUTOSTART
+ VIR_CONNECT_LIST_DOMAINS_NO_AUTOSTART = C.VIR_CONNECT_LIST_DOMAINS_NO_AUTOSTART
+ VIR_CONNECT_LIST_DOMAINS_HAS_SNAPSHOT = C.VIR_CONNECT_LIST_DOMAINS_HAS_SNAPSHOT
+ VIR_CONNECT_LIST_DOMAINS_NO_SNAPSHOT = C.VIR_CONNECT_LIST_DOMAINS_NO_SNAPSHOT
+)
+
+// virDomainMetadataType
+const (
+ VIR_DOMAIN_METADATA_DESCRIPTION = C.VIR_DOMAIN_METADATA_DESCRIPTION
+ VIR_DOMAIN_METADATA_TITLE = C.VIR_DOMAIN_METADATA_TITLE
+ VIR_DOMAIN_METADATA_ELEMENT = C.VIR_DOMAIN_METADATA_ELEMENT
+)
+
+// virDomainVcpuFlags
+const (
+ VIR_DOMAIN_VCPU_CONFIG = C.VIR_DOMAIN_VCPU_CONFIG
+ VIR_DOMAIN_VCPU_CURRENT = C.VIR_DOMAIN_VCPU_CURRENT
+ VIR_DOMAIN_VCPU_LIVE = C.VIR_DOMAIN_VCPU_LIVE
+ VIR_DOMAIN_VCPU_MAXIMUM = C.VIR_DOMAIN_VCPU_MAXIMUM
+ VIR_DOMAIN_VCPU_GUEST = C.VIR_DOMAIN_VCPU_GUEST
+)
+
+// virDomainModificationImpact
+const (
+ VIR_DOMAIN_AFFECT_CONFIG = C.VIR_DOMAIN_AFFECT_CONFIG
+ VIR_DOMAIN_AFFECT_CURRENT = C.VIR_DOMAIN_AFFECT_CURRENT
+ VIR_DOMAIN_AFFECT_LIVE = C.VIR_DOMAIN_AFFECT_LIVE
+)
+
+// virDomainMemoryModFlags
+const (
+ VIR_DOMAIN_MEM_CONFIG = C.VIR_DOMAIN_AFFECT_CONFIG
+ VIR_DOMAIN_MEM_CURRENT = C.VIR_DOMAIN_AFFECT_CURRENT
+ VIR_DOMAIN_MEM_LIVE = C.VIR_DOMAIN_AFFECT_LIVE
+ VIR_DOMAIN_MEM_MAXIMUM = C.VIR_DOMAIN_MEM_MAXIMUM
+)
+
+// virStoragePoolState
+const (
+ VIR_STORAGE_POOL_INACTIVE = C.VIR_STORAGE_POOL_INACTIVE // Not running
+ VIR_STORAGE_POOL_BUILDING = C.VIR_STORAGE_POOL_BUILDING // Initializing pool,not available
+ VIR_STORAGE_POOL_RUNNING = C.VIR_STORAGE_POOL_RUNNING // Running normally
+ VIR_STORAGE_POOL_DEGRADED = C.VIR_STORAGE_POOL_DEGRADED // Running degraded
+ VIR_STORAGE_POOL_INACCESSIBLE = C.VIR_STORAGE_POOL_INACCESSIBLE // Running,but not accessible
+)
+
+// virStoragePoolBuildFlags
+const (
+ VIR_STORAGE_POOL_BUILD_NEW = C.VIR_STORAGE_POOL_BUILD_NEW // Regular build from scratch
+ VIR_STORAGE_POOL_BUILD_REPAIR = C.VIR_STORAGE_POOL_BUILD_REPAIR // Repair / reinitialize
+ VIR_STORAGE_POOL_BUILD_RESIZE = C.VIR_STORAGE_POOL_BUILD_RESIZE // Extend existing pool
+ VIR_STORAGE_POOL_BUILD_NO_OVERWRITE = C.VIR_STORAGE_POOL_BUILD_NO_OVERWRITE // Do not overwrite existing pool
+ VIR_STORAGE_POOL_BUILD_OVERWRITE = C.VIR_STORAGE_POOL_BUILD_OVERWRITE // Overwrite data
+)
+
+// virDomainDestroyFlags
+const (
+ VIR_DOMAIN_DESTROY_DEFAULT = C.VIR_DOMAIN_DESTROY_DEFAULT
+ VIR_DOMAIN_DESTROY_GRACEFUL = C.VIR_DOMAIN_DESTROY_GRACEFUL
+)
+
+// virDomainShutdownFlags
+const (
+ VIR_DOMAIN_SHUTDOWN_DEFAULT = C.VIR_DOMAIN_SHUTDOWN_DEFAULT
+ VIR_DOMAIN_SHUTDOWN_ACPI_POWER_BTN = C.VIR_DOMAIN_SHUTDOWN_ACPI_POWER_BTN
+ VIR_DOMAIN_SHUTDOWN_GUEST_AGENT = C.VIR_DOMAIN_SHUTDOWN_GUEST_AGENT
+ VIR_DOMAIN_SHUTDOWN_INITCTL = C.VIR_DOMAIN_SHUTDOWN_INITCTL
+ VIR_DOMAIN_SHUTDOWN_SIGNAL = C.VIR_DOMAIN_SHUTDOWN_SIGNAL
+)
+
+// virDomainUndefineFlags
+const (
+ VIR_DOMAIN_UNDEFINE_DEFAULT = 0
+ VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = C.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE
+ VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA = C.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA
+ VIR_DOMAIN_UNDEFINE_NVRAM = C.VIR_DOMAIN_UNDEFINE_NVRAM
+)
+
+// virDomainAttachDeviceFlags
+const (
+ VIR_DOMAIN_DEVICE_MODIFY_CONFIG = C.VIR_DOMAIN_AFFECT_CONFIG
+ VIR_DOMAIN_DEVICE_MODIFY_CURRENT = C.VIR_DOMAIN_AFFECT_CURRENT
+ VIR_DOMAIN_DEVICE_MODIFY_LIVE = C.VIR_DOMAIN_AFFECT_LIVE
+ VIR_DOMAIN_DEVICE_MODIFY_FORCE = C.VIR_DOMAIN_DEVICE_MODIFY_FORCE
+)
+
+// virStorageVolCreateFlags
+const (
+ VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA = C.VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA
+)
+
+// virStorageVolDeleteFlags
+const (
+ VIR_STORAGE_VOL_DELETE_NORMAL = C.VIR_STORAGE_VOL_DELETE_NORMAL // Delete metadata only (fast)
+ VIR_STORAGE_VOL_DELETE_ZEROED = C.VIR_STORAGE_VOL_DELETE_ZEROED // Clear all data to zeros (slow)
+)
+
+// virStorageVolResizeFlags
+const (
+ VIR_STORAGE_VOL_RESIZE_ALLOCATE = C.VIR_STORAGE_VOL_RESIZE_ALLOCATE // force allocation of new size
+ VIR_STORAGE_VOL_RESIZE_DELTA = C.VIR_STORAGE_VOL_RESIZE_DELTA // size is relative to current
+ VIR_STORAGE_VOL_RESIZE_SHRINK = C.VIR_STORAGE_VOL_RESIZE_SHRINK // allow decrease in capacity
+)
+
+// virStorageVolType
+const (
+ VIR_STORAGE_VOL_FILE = C.VIR_STORAGE_VOL_FILE // Regular file based volumes
+ VIR_STORAGE_VOL_BLOCK = C.VIR_STORAGE_VOL_BLOCK // Block based volumes
+ VIR_STORAGE_VOL_DIR = C.VIR_STORAGE_VOL_DIR // Directory-passthrough based volume
+ VIR_STORAGE_VOL_NETWORK = C.VIR_STORAGE_VOL_NETWORK //Network volumes like RBD (RADOS Block Device)
+ VIR_STORAGE_VOL_NETDIR = C.VIR_STORAGE_VOL_NETDIR // Network accessible directory that can contain other network volumes
+)
+
+// virStorageVolWipeAlgorithm
+const (
+ VIR_STORAGE_VOL_WIPE_ALG_ZERO = C.VIR_STORAGE_VOL_WIPE_ALG_ZERO // 1-pass, all zeroes
+ VIR_STORAGE_VOL_WIPE_ALG_NNSA = C.VIR_STORAGE_VOL_WIPE_ALG_NNSA // 4-pass NNSA Policy Letter NAP-14.1-C (XVI-8)
+ VIR_STORAGE_VOL_WIPE_ALG_DOD = C.VIR_STORAGE_VOL_WIPE_ALG_DOD // 4-pass DoD 5220.22-M section 8-306 procedure
+ VIR_STORAGE_VOL_WIPE_ALG_BSI = C.VIR_STORAGE_VOL_WIPE_ALG_BSI // 9-pass method recommended by the German Center of Security in Information Technologies
+ VIR_STORAGE_VOL_WIPE_ALG_GUTMANN = C.VIR_STORAGE_VOL_WIPE_ALG_GUTMANN // The canonical 35-pass sequence
+ VIR_STORAGE_VOL_WIPE_ALG_SCHNEIER = C.VIR_STORAGE_VOL_WIPE_ALG_SCHNEIER // 7-pass method described by Bruce Schneier in "Applied Cryptography" (1996)
+ VIR_STORAGE_VOL_WIPE_ALG_PFITZNER7 = C.VIR_STORAGE_VOL_WIPE_ALG_PFITZNER7 // 7-pass random
+ VIR_STORAGE_VOL_WIPE_ALG_PFITZNER33 = C.VIR_STORAGE_VOL_WIPE_ALG_PFITZNER33 // 33-pass random
+ VIR_STORAGE_VOL_WIPE_ALG_RANDOM = C.VIR_STORAGE_VOL_WIPE_ALG_RANDOM // 1-pass random
+)
+
+// virSecretUsageType
+const (
+ VIR_SECRET_USAGE_TYPE_NONE = C.VIR_SECRET_USAGE_TYPE_NONE
+ VIR_SECRET_USAGE_TYPE_VOLUME = C.VIR_SECRET_USAGE_TYPE_VOLUME
+ VIR_SECRET_USAGE_TYPE_CEPH = C.VIR_SECRET_USAGE_TYPE_CEPH
+ VIR_SECRET_USAGE_TYPE_ISCSI = C.VIR_SECRET_USAGE_TYPE_ISCSI
+)
+
+// virConnectListAllNetworksFlags
+const (
+ VIR_CONNECT_LIST_NETWORKS_INACTIVE = C.VIR_CONNECT_LIST_NETWORKS_INACTIVE
+ VIR_CONNECT_LIST_NETWORKS_ACTIVE = C.VIR_CONNECT_LIST_NETWORKS_ACTIVE
+ VIR_CONNECT_LIST_NETWORKS_PERSISTENT = C.VIR_CONNECT_LIST_NETWORKS_PERSISTENT
+ VIR_CONNECT_LIST_NETWORKS_TRANSIENT = C.VIR_CONNECT_LIST_NETWORKS_TRANSIENT
+ VIR_CONNECT_LIST_NETWORKS_AUTOSTART = C.VIR_CONNECT_LIST_NETWORKS_AUTOSTART
+ VIR_CONNECT_LIST_NETWORKS_NO_AUTOSTART = C.VIR_CONNECT_LIST_NETWORKS_NO_AUTOSTART
+)
+
+// virConnectListAllStoragePoolsFlags
+const (
+ VIR_CONNECT_LIST_STORAGE_POOLS_INACTIVE = C.VIR_CONNECT_LIST_STORAGE_POOLS_INACTIVE
+ VIR_CONNECT_LIST_STORAGE_POOLS_ACTIVE = C.VIR_CONNECT_LIST_STORAGE_POOLS_ACTIVE
+ VIR_CONNECT_LIST_STORAGE_POOLS_PERSISTENT = C.VIR_CONNECT_LIST_STORAGE_POOLS_PERSISTENT
+ VIR_CONNECT_LIST_STORAGE_POOLS_TRANSIENT = C.VIR_CONNECT_LIST_STORAGE_POOLS_TRANSIENT
+ VIR_CONNECT_LIST_STORAGE_POOLS_AUTOSTART = C.VIR_CONNECT_LIST_STORAGE_POOLS_AUTOSTART
+ VIR_CONNECT_LIST_STORAGE_POOLS_NO_AUTOSTART = C.VIR_CONNECT_LIST_STORAGE_POOLS_NO_AUTOSTART
+ VIR_CONNECT_LIST_STORAGE_POOLS_DIR = C.VIR_CONNECT_LIST_STORAGE_POOLS_DIR
+ VIR_CONNECT_LIST_STORAGE_POOLS_FS = C.VIR_CONNECT_LIST_STORAGE_POOLS_FS
+ VIR_CONNECT_LIST_STORAGE_POOLS_NETFS = C.VIR_CONNECT_LIST_STORAGE_POOLS_NETFS
+ VIR_CONNECT_LIST_STORAGE_POOLS_LOGICAL = C.VIR_CONNECT_LIST_STORAGE_POOLS_LOGICAL
+ VIR_CONNECT_LIST_STORAGE_POOLS_DISK = C.VIR_CONNECT_LIST_STORAGE_POOLS_DISK
+ VIR_CONNECT_LIST_STORAGE_POOLS_ISCSI = C.VIR_CONNECT_LIST_STORAGE_POOLS_ISCSI
+ VIR_CONNECT_LIST_STORAGE_POOLS_SCSI = C.VIR_CONNECT_LIST_STORAGE_POOLS_SCSI
+ VIR_CONNECT_LIST_STORAGE_POOLS_MPATH = C.VIR_CONNECT_LIST_STORAGE_POOLS_MPATH
+ VIR_CONNECT_LIST_STORAGE_POOLS_RBD = C.VIR_CONNECT_LIST_STORAGE_POOLS_RBD
+ VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG = C.VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG
+ VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER = C.VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER
+)
+
+// virStreamFlags
+const (
+ VIR_STREAM_NONBLOCK = C.VIR_STREAM_NONBLOCK
+)
+
+// virKeycodeSet
+const (
+ VIR_KEYCODE_SET_LINUX = C.VIR_KEYCODE_SET_LINUX
+ VIR_KEYCODE_SET_XT = C.VIR_KEYCODE_SET_XT
+ VIR_KEYCODE_SET_ATSET1 = C.VIR_KEYCODE_SET_ATSET1
+ VIR_KEYCODE_SET_ATSET2 = C.VIR_KEYCODE_SET_ATSET2
+ VIR_KEYCODE_SET_ATSET3 = C.VIR_KEYCODE_SET_ATSET3
+ VIR_KEYCODE_SET_OSX = C.VIR_KEYCODE_SET_OSX
+ VIR_KEYCODE_SET_XT_KBD = C.VIR_KEYCODE_SET_XT_KBD
+ VIR_KEYCODE_SET_USB = C.VIR_KEYCODE_SET_USB
+ VIR_KEYCODE_SET_WIN32 = C.VIR_KEYCODE_SET_WIN32
+ VIR_KEYCODE_SET_RFB = C.VIR_KEYCODE_SET_RFB
+)
+
+// virDomainCreateFlags
+const (
+ VIR_DOMAIN_NONE = C.VIR_DOMAIN_NONE
+ VIR_DOMAIN_START_PAUSED = C.VIR_DOMAIN_START_PAUSED
+ VIR_DOMAIN_START_AUTODESTROY = C.VIR_DOMAIN_START_AUTODESTROY
+ VIR_DOMAIN_START_BYPASS_CACHE = C.VIR_DOMAIN_START_BYPASS_CACHE
+ VIR_DOMAIN_START_FORCE_BOOT = C.VIR_DOMAIN_START_FORCE_BOOT
+)
+
+const VIR_DOMAIN_MEMORY_PARAM_UNLIMITED = C.VIR_DOMAIN_MEMORY_PARAM_UNLIMITED
+
+// virDomainEventID
+const (
+ // event parameter in the callback is of type DomainLifecycleEvent
+ VIR_DOMAIN_EVENT_ID_LIFECYCLE = C.VIR_DOMAIN_EVENT_ID_LIFECYCLE
+
+ // event parameter in the callback is nil
+ VIR_DOMAIN_EVENT_ID_REBOOT = C.VIR_DOMAIN_EVENT_ID_REBOOT
+
+ // event parameter in the callback is of type DomainRTCChangeEvent
+ VIR_DOMAIN_EVENT_ID_RTC_CHANGE = C.VIR_DOMAIN_EVENT_ID_RTC_CHANGE
+
+ // event parameter in the callback is of type DomainWatchdogEvent
+ VIR_DOMAIN_EVENT_ID_WATCHDOG = C.VIR_DOMAIN_EVENT_ID_WATCHDOG
+
+ // event parameter in the callback is of type DomainIOErrorEvent
+ VIR_DOMAIN_EVENT_ID_IO_ERROR = C.VIR_DOMAIN_EVENT_ID_IO_ERROR
+
+ // event parameter in the callback is of type DomainGraphicsEvent
+ VIR_DOMAIN_EVENT_ID_GRAPHICS = C.VIR_DOMAIN_EVENT_ID_GRAPHICS
+
+ // virConnectDomainEventIOErrorReasonCallback
+ VIR_DOMAIN_EVENT_ID_IO_ERROR_REASON = C.VIR_DOMAIN_EVENT_ID_IO_ERROR_REASON
+
+ // event parameter in the callback is nil
+ VIR_DOMAIN_EVENT_ID_CONTROL_ERROR = C.VIR_DOMAIN_EVENT_ID_CONTROL_ERROR
+
+ // event parameter in the callback is of type DomainBlockJobEvent
+ VIR_DOMAIN_EVENT_ID_BLOCK_JOB = C.VIR_DOMAIN_EVENT_ID_BLOCK_JOB
+
+ // event parameter in the callback is of type DomainDiskChangeEvent
+ VIR_DOMAIN_EVENT_ID_DISK_CHANGE = C.VIR_DOMAIN_EVENT_ID_DISK_CHANGE
+
+ // event parameter in the callback is of type DomainTrayChangeEvent
+ VIR_DOMAIN_EVENT_ID_TRAY_CHANGE = C.VIR_DOMAIN_EVENT_ID_TRAY_CHANGE
+
+ // event parameter in the callback is of type DomainReasonEvent
+ VIR_DOMAIN_EVENT_ID_PMWAKEUP = C.VIR_DOMAIN_EVENT_ID_PMWAKEUP
+
+ // event parameter in the callback is of type DomainReasonEvent
+ VIR_DOMAIN_EVENT_ID_PMSUSPEND = C.VIR_DOMAIN_EVENT_ID_PMSUSPEND
+
+ // event parameter in the callback is of type DomainBalloonChangeEvent
+ VIR_DOMAIN_EVENT_ID_BALLOON_CHANGE = C.VIR_DOMAIN_EVENT_ID_BALLOON_CHANGE
+
+ // event parameter in the callback is of type DomainReasonEvent
+ VIR_DOMAIN_EVENT_ID_PMSUSPEND_DISK = C.VIR_DOMAIN_EVENT_ID_PMSUSPEND_DISK
+
+ // event parameter in the callback is of type DomainDeviceRemovedEvent
+ VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED = C.VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED
+
+ // TODO Post 1.2.4, enable later
+ // event parameter in the callback is of type DomainBlockJobEvent
+ // VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2 = C.VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2
+)
+
+// virDomainEventType
+const (
+ VIR_DOMAIN_EVENT_DEFINED = C.VIR_DOMAIN_EVENT_DEFINED
+ VIR_DOMAIN_EVENT_UNDEFINED = C.VIR_DOMAIN_EVENT_UNDEFINED
+ VIR_DOMAIN_EVENT_STARTED = C.VIR_DOMAIN_EVENT_STARTED
+ VIR_DOMAIN_EVENT_SUSPENDED = C.VIR_DOMAIN_EVENT_SUSPENDED
+ VIR_DOMAIN_EVENT_RESUMED = C.VIR_DOMAIN_EVENT_RESUMED
+ VIR_DOMAIN_EVENT_STOPPED = C.VIR_DOMAIN_EVENT_STOPPED
+ VIR_DOMAIN_EVENT_SHUTDOWN = C.VIR_DOMAIN_EVENT_SHUTDOWN
+ VIR_DOMAIN_EVENT_PMSUSPENDED = C.VIR_DOMAIN_EVENT_PMSUSPENDED
+ VIR_DOMAIN_EVENT_CRASHED = C.VIR_DOMAIN_EVENT_CRASHED
+)
+
+// virDomainEventWatchdogAction:
+// The action that is to be taken due to the watchdog device firing
+const (
+ // No action, watchdog ignored
+ VIR_DOMAIN_EVENT_WATCHDOG_NONE = C.VIR_DOMAIN_EVENT_WATCHDOG_NONE
+
+ // Guest CPUs are paused
+ VIR_DOMAIN_EVENT_WATCHDOG_PAUSE = C.VIR_DOMAIN_EVENT_WATCHDOG_PAUSE
+
+ // Guest CPUs are reset
+ VIR_DOMAIN_EVENT_WATCHDOG_RESET = C.VIR_DOMAIN_EVENT_WATCHDOG_RESET
+
+ // Guest is forcibly powered off
+ VIR_DOMAIN_EVENT_WATCHDOG_POWEROFF = C.VIR_DOMAIN_EVENT_WATCHDOG_POWEROFF
+
+ // Guest is requested to gracefully shutdown
+ VIR_DOMAIN_EVENT_WATCHDOG_SHUTDOWN = C.VIR_DOMAIN_EVENT_WATCHDOG_SHUTDOWN
+
+ // No action, a debug message logged
+ VIR_DOMAIN_EVENT_WATCHDOG_DEBUG = C.VIR_DOMAIN_EVENT_WATCHDOG_DEBUG
+)
+
+// virDomainEventIOErrorAction
+// The action that is to be taken due to an IO error occurring
+const (
+ // No action, IO error ignored
+ VIR_DOMAIN_EVENT_IO_ERROR_NONE = C.VIR_DOMAIN_EVENT_IO_ERROR_NONE
+
+ // Guest CPUs are paused
+ VIR_DOMAIN_EVENT_IO_ERROR_PAUSE = C.VIR_DOMAIN_EVENT_IO_ERROR_PAUSE
+
+ // IO error reported to guest OS
+ VIR_DOMAIN_EVENT_IO_ERROR_REPORT = C.VIR_DOMAIN_EVENT_IO_ERROR_REPORT
+)
+
+// virDomainEventGraphicsPhase
+// The phase of the graphics client connection
+const (
+ // Initial socket connection established
+ VIR_DOMAIN_EVENT_GRAPHICS_CONNECT = C.VIR_DOMAIN_EVENT_GRAPHICS_CONNECT
+
+ // Authentication & setup completed
+ VIR_DOMAIN_EVENT_GRAPHICS_INITIALIZE = C.VIR_DOMAIN_EVENT_GRAPHICS_INITIALIZE
+
+ // Final socket disconnection
+ VIR_DOMAIN_EVENT_GRAPHICS_DISCONNECT = C.VIR_DOMAIN_EVENT_GRAPHICS_DISCONNECT
+)
+
+// virDomainEventGraphicsAddressType
+const (
+ // IPv4 address
+ VIR_DOMAIN_EVENT_GRAPHICS_ADDRESS_IPV4 = C.VIR_DOMAIN_EVENT_GRAPHICS_ADDRESS_IPV4
+
+ // IPv6 address
+ VIR_DOMAIN_EVENT_GRAPHICS_ADDRESS_IPV6 = C.VIR_DOMAIN_EVENT_GRAPHICS_ADDRESS_IPV6
+
+ // UNIX socket path
+ VIR_DOMAIN_EVENT_GRAPHICS_ADDRESS_UNIX = C.VIR_DOMAIN_EVENT_GRAPHICS_ADDRESS_UNIX
+)
+
+// virDomainBlockJobType
+const (
+ // Placeholder
+ VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN = C.VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN
+
+ // Block Pull (virDomainBlockPull, or virDomainBlockRebase without
+ // flags), job ends on completion
+ VIR_DOMAIN_BLOCK_JOB_TYPE_PULL = C.VIR_DOMAIN_BLOCK_JOB_TYPE_PULL
+
+ // Block Copy (virDomainBlockCopy, or virDomainBlockRebase with
+ // flags), job exists as long as mirroring is active
+ VIR_DOMAIN_BLOCK_JOB_TYPE_COPY = C.VIR_DOMAIN_BLOCK_JOB_TYPE_COPY
+
+ // Block Commit (virDomainBlockCommit without flags), job ends on
+ // completion
+ VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT = C.VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT
+
+ // TODO Post 1.2.4, enable later
+ // Active Block Commit (virDomainBlockCommit with flags), job
+ // exists as long as sync is active
+ // VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT = C.VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT
+)
+
+// virConnectDomainEventBlockJobStatus
+const (
+ VIR_DOMAIN_BLOCK_JOB_COMPLETED = C.VIR_DOMAIN_BLOCK_JOB_COMPLETED
+ VIR_DOMAIN_BLOCK_JOB_FAILED = C.VIR_DOMAIN_BLOCK_JOB_FAILED
+ VIR_DOMAIN_BLOCK_JOB_CANCELED = C.VIR_DOMAIN_BLOCK_JOB_CANCELED
+ VIR_DOMAIN_BLOCK_JOB_READY = C.VIR_DOMAIN_BLOCK_JOB_READY
+)
+
+// virConnectDomainEventDiskChangeReason
+const (
+ // OldSrcPath is set
+ VIR_DOMAIN_EVENT_DISK_CHANGE_MISSING_ON_START = C.VIR_DOMAIN_EVENT_DISK_CHANGE_MISSING_ON_START
+ VIR_DOMAIN_EVENT_DISK_DROP_MISSING_ON_START = C.VIR_DOMAIN_EVENT_DISK_DROP_MISSING_ON_START
+)
+
+// virConnectDomainEventTrayChangeReason
+const (
+ VIR_DOMAIN_EVENT_TRAY_CHANGE_OPEN = C.VIR_DOMAIN_EVENT_TRAY_CHANGE_OPEN
+ VIR_DOMAIN_EVENT_TRAY_CHANGE_CLOSE = C.VIR_DOMAIN_EVENT_TRAY_CHANGE_CLOSE
+)
+
+// virDomainRunningReason
+const (
+ VIR_DOMAIN_RUNNING_UNKNOWN = C.VIR_DOMAIN_RUNNING_UNKNOWN
+ VIR_DOMAIN_RUNNING_BOOTED = C.VIR_DOMAIN_RUNNING_BOOTED /* normal startup from boot */
+ VIR_DOMAIN_RUNNING_MIGRATED = C.VIR_DOMAIN_RUNNING_MIGRATED /* migrated from another host */
+ VIR_DOMAIN_RUNNING_RESTORED = C.VIR_DOMAIN_RUNNING_RESTORED /* restored from a state file */
+ VIR_DOMAIN_RUNNING_FROM_SNAPSHOT = C.VIR_DOMAIN_RUNNING_FROM_SNAPSHOT /* restored from snapshot */
+ VIR_DOMAIN_RUNNING_UNPAUSED = C.VIR_DOMAIN_RUNNING_UNPAUSED /* returned from paused state */
+ VIR_DOMAIN_RUNNING_MIGRATION_CANCELED = C.VIR_DOMAIN_RUNNING_MIGRATION_CANCELED /* returned from migration */
+ VIR_DOMAIN_RUNNING_SAVE_CANCELED = C.VIR_DOMAIN_RUNNING_SAVE_CANCELED /* returned from failed save process */
+ VIR_DOMAIN_RUNNING_WAKEUP = C.VIR_DOMAIN_RUNNING_WAKEUP /* returned from pmsuspended due to wakeup event */
+ VIR_DOMAIN_RUNNING_CRASHED = C.VIR_DOMAIN_RUNNING_CRASHED /* resumed from crashed */
+)
+
+// virDomainPausedReason
+const (
+ VIR_DOMAIN_PAUSED_UNKNOWN = C.VIR_DOMAIN_PAUSED_UNKNOWN /* the reason is unknown */
+ VIR_DOMAIN_PAUSED_USER = C.VIR_DOMAIN_PAUSED_USER /* paused on user request */
+ VIR_DOMAIN_PAUSED_MIGRATION = C.VIR_DOMAIN_PAUSED_MIGRATION /* paused for offline migration */
+ VIR_DOMAIN_PAUSED_SAVE = C.VIR_DOMAIN_PAUSED_SAVE /* paused for save */
+ VIR_DOMAIN_PAUSED_DUMP = C.VIR_DOMAIN_PAUSED_DUMP /* paused for offline core dump */
+ VIR_DOMAIN_PAUSED_IOERROR = C.VIR_DOMAIN_PAUSED_IOERROR /* paused due to a disk I/O error */
+ VIR_DOMAIN_PAUSED_WATCHDOG = C.VIR_DOMAIN_PAUSED_WATCHDOG /* paused due to a watchdog event */
+ VIR_DOMAIN_PAUSED_FROM_SNAPSHOT = C.VIR_DOMAIN_PAUSED_FROM_SNAPSHOT /* paused after restoring from snapshot */
+ VIR_DOMAIN_PAUSED_SHUTTING_DOWN = C.VIR_DOMAIN_PAUSED_SHUTTING_DOWN /* paused during shutdown process */
+ VIR_DOMAIN_PAUSED_SNAPSHOT = C.VIR_DOMAIN_PAUSED_SNAPSHOT /* paused while creating a snapshot */
+ VIR_DOMAIN_PAUSED_CRASHED = C.VIR_DOMAIN_PAUSED_CRASHED /* paused due to a guest crash */
+)
+
+// virDomainXMLFlags
+const (
+ VIR_DOMAIN_XML_SECURE = C.VIR_DOMAIN_XML_SECURE /* dump security sensitive information too */
+ VIR_DOMAIN_XML_INACTIVE = C.VIR_DOMAIN_XML_INACTIVE /* dump inactive domain information */
+ VIR_DOMAIN_XML_UPDATE_CPU = C.VIR_DOMAIN_XML_UPDATE_CPU /* update guest CPU requirements according to host CPU */
+ VIR_DOMAIN_XML_MIGRATABLE = C.VIR_DOMAIN_XML_MIGRATABLE /* dump XML suitable for migration */
+)
+
+/*
+ * QMP has two different kinds of ways to talk to QEMU. One is legacy (HMP,
+ * or 'human' monitor protocol. The default is QMP, which is all-JSON.
+ *
+ * QMP json commands are of the format:
+ * {"execute" : "query-cpus"}
+ *
+ * whereas the same command in 'HMP' would be:
+ * 'info cpus'
+ */
+const (
+ VIR_DOMAIN_QEMU_MONITOR_COMMAND_DEFAULT = 0
+ VIR_DOMAIN_QEMU_MONITOR_COMMAND_HMP = (1 << 0)
+)
+
+// virDomainInterfaceAddressesSource
+const (
+ VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE = 0
+ VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT = 1
+)
+
+// virIPAddrType
+const (
+ VIR_IP_ADDR_TYPE_IPV4 = 0
+ VIR_IP_ADDR_TYPE_IPV6 = 1
+)
+
+// virDomainQemuAgentCommandTimeoutValues
+const (
+ VIR_DOMAIN_QEMU_AGENT_COMMAND_MIN = -2
+ VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK = -2
+ VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT = -1
+ VIR_DOMAIN_QEMU_AGENT_COMMAND_NOWAIT = 0
+ VIR_DOMAIN_QEMU_AGENT_COMMAND_SHUTDOWN = 60
+)
diff --git a/vendor/github.com/dmacvicar/libvirt-go/domain.go b/vendor/github.com/dmacvicar/libvirt-go/domain.go
new file mode 100644
index 00000000..227a9bac
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/domain.go
@@ -0,0 +1,784 @@
+package libvirt
+
+/*
+#cgo LDFLAGS: -lvirt-qemu -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/libvirt-qemu.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+*/
+import "C"
+
+import (
+ "reflect"
+ "strings"
+ "unsafe"
+)
+
+type VirDomain struct {
+ ptr C.virDomainPtr
+}
+
+type VirDomainBlockInfo struct {
+ ptr C.virDomainBlockInfo
+}
+
+type VirDomainInfo struct {
+ ptr C.virDomainInfo
+}
+
+type VirTypedParameter struct {
+ Name string
+ Value interface{}
+}
+
+type VirDomainMemoryStat struct {
+ Tag int32
+ Val uint64
+}
+
+type VirVcpuInfo struct {
+ Number uint32
+ State int32
+ CpuTime uint64
+ Cpu int32
+}
+
+type VirTypedParameters []VirTypedParameter
+
+func (dest *VirTypedParameters) loadFromCPtr(params C.virTypedParameterPtr, nParams int) {
+ // reset slice
+ *dest = VirTypedParameters{}
+
+ // transform that C array to a go slice
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(params)),
+ Len: int(nParams),
+ Cap: int(nParams),
+ }
+ rawParams := *(*[]C.struct__virTypedParameter)(unsafe.Pointer(&hdr))
+
+ // there is probably a more elegant way to deal with that union
+ for _, rawParam := range rawParams {
+ name := C.GoStringN(&rawParam.field[0], C.VIR_TYPED_PARAM_FIELD_LENGTH)
+ if nbIdx := strings.Index(name, "\x00"); nbIdx != -1 {
+ name = name[:nbIdx]
+ }
+ switch rawParam._type {
+ case C.VIR_TYPED_PARAM_INT:
+ *dest = append(*dest, VirTypedParameter{name, int(*(*C.int)(unsafe.Pointer(&rawParam.value[0])))})
+ case C.VIR_TYPED_PARAM_UINT:
+ *dest = append(*dest, VirTypedParameter{name, uint32(*(*C.uint)(unsafe.Pointer(&rawParam.value[0])))})
+ case C.VIR_TYPED_PARAM_LLONG:
+ *dest = append(*dest, VirTypedParameter{name, int64(*(*C.longlong)(unsafe.Pointer(&rawParam.value[0])))})
+ case C.VIR_TYPED_PARAM_ULLONG:
+ *dest = append(*dest, VirTypedParameter{name, uint64(*(*C.ulonglong)(unsafe.Pointer(&rawParam.value[0])))})
+ case C.VIR_TYPED_PARAM_DOUBLE:
+ *dest = append(*dest, VirTypedParameter{name, float64(*(*C.double)(unsafe.Pointer(&rawParam.value[0])))})
+ case C.VIR_TYPED_PARAM_BOOLEAN:
+ if int(*(*C.char)(unsafe.Pointer(&rawParam.value[0]))) == 1 {
+ *dest = append(*dest, VirTypedParameter{name, true})
+ } else {
+ *dest = append(*dest, VirTypedParameter{name, false})
+ }
+ case C.VIR_TYPED_PARAM_STRING:
+ *dest = append(*dest, VirTypedParameter{name, C.GoString((*C.char)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&rawParam.value[0])))))})
+ }
+ }
+}
+
+func (d *VirDomain) Free() error {
+ if result := C.virDomainFree(d.ptr); result != 0 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) Create() error {
+ result := C.virDomainCreate(d.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) CreateWithFlags(flags uint) error {
+ result := C.virDomainCreateWithFlags(d.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) Destroy() error {
+ result := C.virDomainDestroy(d.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) Shutdown() error {
+ result := C.virDomainShutdown(d.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) Reboot(flags uint) error {
+ result := C.virDomainReboot(d.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) IsActive() (bool, error) {
+ result := C.virDomainIsActive(d.ptr)
+ if result == -1 {
+ return false, GetLastError()
+ }
+ if result == 1 {
+ return true, nil
+ }
+ return false, nil
+}
+
+func (d *VirDomain) IsPersistent() (bool, error) {
+ result := C.virDomainIsPersistent(d.ptr)
+ if result == -1 {
+ return false, GetLastError()
+ }
+ if result == 1 {
+ return true, nil
+ }
+ return false, nil
+}
+
+func (d *VirDomain) SetAutostart(autostart bool) error {
+ var cAutostart C.int
+ switch autostart {
+ case true:
+ cAutostart = 1
+ default:
+ cAutostart = 0
+ }
+ result := C.virDomainSetAutostart(d.ptr, cAutostart)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) GetAutostart() (bool, error) {
+ var out C.int
+ result := C.virDomainGetAutostart(d.ptr, (*C.int)(unsafe.Pointer(&out)))
+ if result == -1 {
+ return false, GetLastError()
+ }
+ switch out {
+ case 1:
+ return true, nil
+ default:
+ return false, nil
+ }
+}
+
+func (d *VirDomain) GetBlockInfo(disk string, flag uint) (VirDomainBlockInfo, error) {
+ bi := VirDomainBlockInfo{}
+ var ptr C.virDomainBlockInfo
+ cDisk := C.CString(disk)
+ defer C.free(unsafe.Pointer(cDisk))
+ result := C.virDomainGetBlockInfo(d.ptr, cDisk, (*C.virDomainBlockInfo)(unsafe.Pointer(&ptr)), C.uint(flag))
+ if result == -1 {
+ return bi, GetLastError()
+ }
+ bi.ptr = ptr
+ return bi, nil
+}
+
+func (b *VirDomainBlockInfo) Allocation() uint64 {
+ return uint64(b.ptr.allocation)
+}
+
+func (b *VirDomainBlockInfo) Capacity() uint64 {
+ return uint64(b.ptr.capacity)
+}
+
+func (b *VirDomainBlockInfo) Physical() uint64 {
+ return uint64(b.ptr.physical)
+}
+
+func (d *VirDomain) GetName() (string, error) {
+ name := C.virDomainGetName(d.ptr)
+ if name == nil {
+ return "", GetLastError()
+ }
+ return C.GoString(name), nil
+}
+
+func (d *VirDomain) GetState() ([]int, error) {
+ var cState C.int
+ var cReason C.int
+ result := C.virDomainGetState(d.ptr,
+ (*C.int)(unsafe.Pointer(&cState)),
+ (*C.int)(unsafe.Pointer(&cReason)),
+ 0)
+ if int(result) == -1 {
+ return []int{}, GetLastError()
+ }
+ return []int{int(cState), int(cReason)}, nil
+}
+
+func (d *VirDomain) GetID() (uint, error) {
+ id := uint(C.virDomainGetID(d.ptr))
+ if id == ^uint(0) {
+ return id, GetLastError()
+ }
+ return id, nil
+}
+
+func (d *VirDomain) GetUUID() ([]byte, error) {
+ var cUuid [C.VIR_UUID_BUFLEN](byte)
+ cuidPtr := unsafe.Pointer(&cUuid)
+ result := C.virDomainGetUUID(d.ptr, (*C.uchar)(cuidPtr))
+ if result != 0 {
+ return []byte{}, GetLastError()
+ }
+ return C.GoBytes(cuidPtr, C.VIR_UUID_BUFLEN), nil
+}
+
+func (d *VirDomain) GetUUIDString() (string, error) {
+ var cUuid [C.VIR_UUID_STRING_BUFLEN](C.char)
+ cuidPtr := unsafe.Pointer(&cUuid)
+ result := C.virDomainGetUUIDString(d.ptr, (*C.char)(cuidPtr))
+ if result != 0 {
+ return "", GetLastError()
+ }
+ return C.GoString((*C.char)(cuidPtr)), nil
+}
+
+func (d *VirDomain) GetInfo() (VirDomainInfo, error) {
+ di := VirDomainInfo{}
+ var ptr C.virDomainInfo
+ result := C.virDomainGetInfo(d.ptr, (*C.virDomainInfo)(unsafe.Pointer(&ptr)))
+ if result == -1 {
+ return di, GetLastError()
+ }
+ di.ptr = ptr
+ return di, nil
+}
+
+func (d *VirDomain) GetXMLDesc(flags uint32) (string, error) {
+ result := C.virDomainGetXMLDesc(d.ptr, C.uint(flags))
+ if result == nil {
+ return "", GetLastError()
+ }
+ xml := C.GoString(result)
+ C.free(unsafe.Pointer(result))
+ return xml, nil
+}
+
+func (i *VirDomainInfo) GetState() uint8 {
+ return uint8(i.ptr.state)
+}
+
+func (i *VirDomainInfo) GetMaxMem() uint64 {
+ return uint64(i.ptr.maxMem)
+}
+
+func (i *VirDomainInfo) GetMemory() uint64 {
+ return uint64(i.ptr.memory)
+}
+
+func (i *VirDomainInfo) GetNrVirtCpu() uint16 {
+ return uint16(i.ptr.nrVirtCpu)
+}
+
+func (i *VirDomainInfo) GetCpuTime() uint64 {
+ return uint64(i.ptr.cpuTime)
+}
+
+func (d *VirDomain) GetCPUStats(params *VirTypedParameters, nParams int, startCpu int, nCpus uint32, flags uint32) (int, error) {
+ var cParams C.virTypedParameterPtr
+ var cParamsLen int
+
+ cParamsLen = int(nCpus) * nParams
+
+ if params != nil && cParamsLen > 0 {
+ cParams = (C.virTypedParameterPtr)(C.calloc(C.size_t(cParamsLen), C.size_t(unsafe.Sizeof(C.struct__virTypedParameter{}))))
+ defer C.virTypedParamsFree(cParams, C.int(cParamsLen))
+ } else {
+ cParamsLen = 0
+ cParams = nil
+ }
+
+ result := int(C.virDomainGetCPUStats(d.ptr, (C.virTypedParameterPtr)(cParams), C.uint(nParams), C.int(startCpu), C.uint(nCpus), C.uint(flags)))
+ if result == -1 {
+ return result, GetLastError()
+ }
+
+ if cParamsLen > 0 {
+ params.loadFromCPtr(cParams, cParamsLen)
+ }
+
+ return result, nil
+}
+
+// Warning: No test written for this function
+func (d *VirDomain) GetInterfaceParameters(device string, params *VirTypedParameters, nParams *int, flags uint32) (int, error) {
+ var cParams C.virTypedParameterPtr
+
+ if params != nil && *nParams > 0 {
+ cParams = (C.virTypedParameterPtr)(C.calloc(C.size_t(*nParams), C.size_t(unsafe.Sizeof(C.struct__virTypedParameter{}))))
+ defer C.virTypedParamsFree(cParams, C.int(*nParams))
+ } else {
+ cParams = nil
+ }
+
+ result := int(C.virDomainGetInterfaceParameters(d.ptr, C.CString(device), (C.virTypedParameterPtr)(cParams), (*C.int)(unsafe.Pointer(nParams)), C.uint(flags)))
+ if result == -1 {
+ return result, GetLastError()
+ }
+
+ if params != nil && *nParams > 0 {
+ params.loadFromCPtr(cParams, *nParams)
+ }
+
+ return result, nil
+}
+
+func (d *VirDomain) GetMetadata(tipus int, uri string, flags uint32) (string, error) {
+ var cUri *C.char
+ if uri != "" {
+ cUri = C.CString(uri)
+ defer C.free(unsafe.Pointer(cUri))
+ }
+
+ result := C.virDomainGetMetadata(d.ptr, C.int(tipus), cUri, C.uint(flags))
+ if result == nil {
+ return "", GetLastError()
+
+ }
+ defer C.free(unsafe.Pointer(result))
+ return C.GoString(result), nil
+}
+
+func (d *VirDomain) SetMetadata(metaDataType int, metaDataCont, uriKey, uri string, flags uint32) error {
+ var cMetaDataCont *C.char
+ var cUriKey *C.char
+ var cUri *C.char
+
+ cMetaDataCont = C.CString(metaDataCont)
+ defer C.free(unsafe.Pointer(cMetaDataCont))
+
+ if metaDataType == VIR_DOMAIN_METADATA_ELEMENT {
+ cUriKey = C.CString(uriKey)
+ defer C.free(unsafe.Pointer(cUriKey))
+ cUri = C.CString(uri)
+ defer C.free(unsafe.Pointer(cUri))
+ }
+ result := C.virDomainSetMetadata(d.ptr, C.int(metaDataType), cMetaDataCont, cUriKey, cUri, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) Undefine() error {
+ result := C.virDomainUndefine(d.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) SetMaxMemory(memory uint) error {
+ result := C.virDomainSetMaxMemory(d.ptr, C.ulong(memory))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) SetMemory(memory uint64) error {
+ result := C.virDomainSetMemory(d.ptr, C.ulong(memory))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) SetMemoryFlags(memory uint64, flags uint32) error {
+ result := C.virDomainSetMemoryFlags(d.ptr, C.ulong(memory), C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) SetMemoryStatsPeriod(period int, flags uint) error {
+ result := C.virDomainSetMemoryStatsPeriod(d.ptr, C.int(period), C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) SetVcpus(vcpu uint) error {
+ result := C.virDomainSetVcpus(d.ptr, C.uint(vcpu))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) SetVcpusFlags(vcpu uint, flags uint) error {
+ result := C.virDomainSetVcpusFlags(d.ptr, C.uint(vcpu), C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) Suspend() error {
+ result := C.virDomainSuspend(d.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) Resume() error {
+ result := C.virDomainResume(d.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) AbortJob() error {
+ result := C.virDomainAbortJob(d.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) DestroyFlags(flags uint) error {
+ result := C.virDomainDestroyFlags(d.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) ShutdownFlags(flags uint) error {
+ result := C.virDomainShutdownFlags(d.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) UndefineFlags(flags uint) error {
+ result := C.virDomainUndefineFlags(d.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) AttachDevice(xml string) error {
+ cXml := C.CString(xml)
+ defer C.free(unsafe.Pointer(cXml))
+ result := C.virDomainAttachDevice(d.ptr, cXml)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) AttachDeviceFlags(xml string, flags uint) error {
+ cXml := C.CString(xml)
+ defer C.free(unsafe.Pointer(cXml))
+ result := C.virDomainAttachDeviceFlags(d.ptr, cXml, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) DetachDevice(xml string) error {
+ cXml := C.CString(xml)
+ defer C.free(unsafe.Pointer(cXml))
+ result := C.virDomainDetachDevice(d.ptr, cXml)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) DetachDeviceFlags(xml string, flags uint) error {
+ cXml := C.CString(xml)
+ defer C.free(unsafe.Pointer(cXml))
+ result := C.virDomainDetachDeviceFlags(d.ptr, cXml, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) UpdateDeviceFlags(xml string, flags uint) error {
+ cXml := C.CString(xml)
+ defer C.free(unsafe.Pointer(cXml))
+ result := C.virDomainUpdateDeviceFlags(d.ptr, cXml, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) Screenshot(stream *VirStream, screen, flags uint) (string, error) {
+ cType := C.virDomainScreenshot(d.ptr, stream.ptr, C.uint(screen), C.uint(flags))
+ if cType == nil {
+ return "", GetLastError()
+ }
+ defer C.free(unsafe.Pointer(cType))
+
+ mimeType := C.GoString(cType)
+ return mimeType, nil
+}
+
+func (d *VirDomain) SendKey(codeset, holdtime uint, keycodes []uint, flags uint) error {
+ result := C.virDomainSendKey(d.ptr, C.uint(codeset), C.uint(holdtime), (*C.uint)(unsafe.Pointer(&keycodes[0])), C.int(len(keycodes)), C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+
+ return nil
+}
+
+func (d *VirDomain) BlockStatsFlags(disk string, params *VirTypedParameters, nParams int, flags uint32) (int, error) {
+ var cParams C.virTypedParameterPtr
+ cDisk := C.CString(disk)
+ defer C.free(unsafe.Pointer(cDisk))
+
+ cParamsLen := C.int(nParams)
+
+ if params != nil && nParams > 0 {
+ cParams = (C.virTypedParameterPtr)(C.calloc(C.size_t(nParams), C.size_t(unsafe.Sizeof(C.struct__virTypedParameter{}))))
+ defer C.virTypedParamsFree(cParams, cParamsLen)
+ } else {
+ cParams = nil
+ }
+
+ result := int(C.virDomainBlockStatsFlags(d.ptr, cDisk, (C.virTypedParameterPtr)(cParams), &cParamsLen, C.uint(flags)))
+ if result == -1 {
+ return result, GetLastError()
+ }
+
+ if cParamsLen > 0 && params != nil {
+ params.loadFromCPtr(cParams, nParams)
+ }
+
+ return int(cParamsLen), nil
+}
+
+type VirDomainBlockStats struct {
+ RdReq int64
+ WrReq int64
+ RdBytes int64
+ WrBytes int64
+}
+
+type VirDomainInterfaceStats struct {
+ RxBytes int64
+ RxPackets int64
+ RxErrs int64
+ RxDrop int64
+ TxBytes int64
+ TxPackets int64
+ TxErrs int64
+ TxDrop int64
+}
+
+func (d *VirDomain) BlockStats(path string) (VirDomainBlockStats, error) {
+ cPath := C.CString(path)
+ defer C.free(unsafe.Pointer(cPath))
+
+ size := C.size_t(unsafe.Sizeof(C.struct__virDomainBlockStats{}))
+
+ cStats := (C.virDomainBlockStatsPtr)(C.malloc(size))
+ defer C.free(unsafe.Pointer(cStats))
+
+ result := C.virDomainBlockStats(d.ptr, cPath, (C.virDomainBlockStatsPtr)(cStats), size)
+
+ if result != 0 {
+ return VirDomainBlockStats{}, GetLastError()
+ }
+ return VirDomainBlockStats{
+ WrReq: int64(cStats.wr_req),
+ RdReq: int64(cStats.rd_req),
+ RdBytes: int64(cStats.rd_bytes),
+ WrBytes: int64(cStats.wr_bytes),
+ }, nil
+}
+
+func (d *VirDomain) InterfaceStats(path string) (VirDomainInterfaceStats, error) {
+ cPath := C.CString(path)
+ defer C.free(unsafe.Pointer(cPath))
+
+ size := C.size_t(unsafe.Sizeof(C.struct__virDomainInterfaceStats{}))
+
+ cStats := (C.virDomainInterfaceStatsPtr)(C.malloc(size))
+ defer C.free(unsafe.Pointer(cStats))
+
+ result := C.virDomainInterfaceStats(d.ptr, cPath, (C.virDomainInterfaceStatsPtr)(cStats), size)
+
+ if result != 0 {
+ return VirDomainInterfaceStats{}, GetLastError()
+ }
+ return VirDomainInterfaceStats{
+ RxBytes: int64(cStats.rx_bytes),
+ RxPackets: int64(cStats.rx_packets),
+ RxErrs: int64(cStats.rx_errs),
+ RxDrop: int64(cStats.rx_drop),
+ TxBytes: int64(cStats.tx_bytes),
+ TxPackets: int64(cStats.tx_packets),
+ TxErrs: int64(cStats.tx_errs),
+ TxDrop: int64(cStats.tx_drop),
+ }, nil
+}
+
+func (d *VirDomain) MemoryStats(nrStats uint32, flags uint32) ([]VirDomainMemoryStat, error) {
+ ptr := make([]C.virDomainMemoryStatStruct, nrStats)
+
+ result := C.virDomainMemoryStats(
+ d.ptr, (C.virDomainMemoryStatPtr)(unsafe.Pointer(&ptr[0])),
+ C.uint(nrStats), C.uint(flags))
+
+ if result == -1 {
+ return []VirDomainMemoryStat{}, GetLastError()
+ }
+
+ out := make([]VirDomainMemoryStat, result)
+ for i := 0; i < int(result); i++ {
+ out = append(out, VirDomainMemoryStat{
+ Tag: int32(ptr[i].tag),
+ Val: uint64(ptr[i].val),
+ })
+ }
+ return out, nil
+}
+
+func (d *VirDomain) GetVcpus(maxInfo int32) ([]VirVcpuInfo, error) {
+ ptr := make([]C.virVcpuInfo, maxInfo)
+
+ result := C.virDomainGetVcpus(
+ d.ptr, (C.virVcpuInfoPtr)(unsafe.Pointer(&ptr[0])),
+ C.int(maxInfo), nil, C.int(0))
+
+ if result == -1 {
+ return []VirVcpuInfo{}, GetLastError()
+ }
+
+ out := make([]VirVcpuInfo, 0)
+ for i := 0; i < int(result); i++ {
+ out = append(out, VirVcpuInfo{
+ Number: uint32(ptr[i].number),
+ State: int32(ptr[i].state),
+ CpuTime: uint64(ptr[i].cpuTime),
+ Cpu: int32(ptr[i].cpu),
+ })
+ }
+
+ return out, nil
+}
+
+func (d *VirDomain) GetVcpusFlags(flags uint32) (int32, error) {
+ result := C.virDomainGetVcpusFlags(d.ptr, C.uint(flags))
+ if result == -1 {
+ return 0, GetLastError()
+ }
+ return int32(result), nil
+}
+
+func (d *VirDomain) QemuMonitorCommand(flags uint32, command string) (string, error) {
+ var cResult *C.char
+ cCommand := C.CString(command)
+ defer C.free(unsafe.Pointer(cCommand))
+ result := C.virDomainQemuMonitorCommand(d.ptr, cCommand, &cResult, C.uint(flags))
+
+ if result != 0 {
+ return "", GetLastError()
+ }
+
+ rstring := C.GoString(cResult)
+ C.free(unsafe.Pointer(cResult))
+ return rstring, nil
+}
+
+type VirDomainIPAddress struct {
+ Type int
+ Addr string
+ Prefix uint
+}
+
+type VirDomainInterface struct {
+ Name string
+ Hwaddr string
+ Addrs []VirDomainIPAddress
+}
+
+func (d *VirDomain) ListAllInterfaceAddresses(src uint) ([]VirDomainInterface, error) {
+ var cList *C.virDomainInterfacePtr
+ numIfaces := int(C.virDomainInterfaceAddresses(d.ptr, (**C.virDomainInterfacePtr)(&cList), C.uint(src), 0))
+ if numIfaces == -1 {
+ return nil, GetLastError()
+ }
+
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cList)),
+ Len: int(numIfaces),
+ Cap: int(numIfaces),
+ }
+
+ ifaces := make([]VirDomainInterface, numIfaces)
+ ifaceSlice := *(*[]C.virDomainInterfacePtr)(unsafe.Pointer(&hdr))
+
+ for i := 0; i < numIfaces; i++ {
+ ifaces[i].Name = C.GoString(ifaceSlice[i].name)
+ ifaces[i].Hwaddr = C.GoString(ifaceSlice[i].hwaddr)
+
+ numAddr := int(ifaceSlice[i].naddrs)
+ addrHdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(&ifaceSlice[i].addrs)),
+ Len: int(numAddr),
+ Cap: int(numAddr),
+ }
+
+ ifaces[i].Addrs = make([]VirDomainIPAddress, numAddr)
+ addrSlice := *(*[]C.virDomainIPAddressPtr)(unsafe.Pointer(&addrHdr))
+
+ for k := 0; k < numAddr; k++ {
+ ifaces[i].Addrs[k] = VirDomainIPAddress{}
+ ifaces[i].Addrs[k].Type = int(addrSlice[k]._type)
+ ifaces[i].Addrs[k].Addr = C.GoString(addrSlice[k].addr)
+ ifaces[i].Addrs[k].Prefix = uint(addrSlice[k].prefix)
+
+ }
+ C.virDomainInterfaceFree(ifaceSlice[i])
+ }
+ C.free(unsafe.Pointer(cList))
+ return ifaces, nil
+}
+
+func (d *VirDomain) QemuAgentCommand(cmd string, timeout int, flags uint32) string {
+ cCmd := C.CString(cmd)
+ defer C.free(unsafe.Pointer(cCmd))
+ result := C.virDomainQemuAgentCommand(d.ptr, cCmd, C.int(timeout), C.uint(flags))
+
+ return C.GoString(result)
+}
diff --git a/vendor/github.com/dmacvicar/libvirt-go/error.go b/vendor/github.com/dmacvicar/libvirt-go/error.go
new file mode 100644
index 00000000..b5285b5c
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/error.go
@@ -0,0 +1,504 @@
+package libvirt
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+
+#ifndef VIR_FROM_BHYVE
+#define VIR_FROM_BHYVE 57
+#endif
+
+#ifndef VIR_FROM_CRYPTO
+#define VIR_FROM_CRYPTO 58
+#endif
+
+#ifndef VIR_FROM_FIREWALL
+#define VIR_FROM_FIREWALL 59
+#endif
+
+*/
+import "C"
+
+import "fmt"
+
+// virErrorLevel
+const (
+ VIR_ERR_NONE = C.VIR_ERR_NONE
+ VIR_ERR_WARNING = C.VIR_ERR_WARNING
+ VIR_ERR_ERROR = C.VIR_ERR_ERROR
+)
+
+// virErrorNumber
+const (
+ VIR_ERR_OK = C.VIR_ERR_OK
+
+ // internal error
+ VIR_ERR_INTERNAL_ERROR = C.VIR_ERR_INTERNAL_ERROR
+
+ // memory allocation failure
+ VIR_ERR_NO_MEMORY = C.VIR_ERR_NO_MEMORY
+
+ // no support for this function
+ VIR_ERR_NO_SUPPORT = C.VIR_ERR_NO_SUPPORT
+
+ // could not resolve hostname
+ VIR_ERR_UNKNOWN_HOST = C.VIR_ERR_UNKNOWN_HOST
+
+ // can't connect to hypervisor
+ VIR_ERR_NO_CONNECT = C.VIR_ERR_NO_CONNECT
+
+ // invalid connection object
+ VIR_ERR_INVALID_CONN = C.VIR_ERR_INVALID_CONN
+
+ // invalid domain object
+ VIR_ERR_INVALID_DOMAIN = C.VIR_ERR_INVALID_DOMAIN
+
+ // invalid function argument
+ VIR_ERR_INVALID_ARG = C.VIR_ERR_INVALID_ARG
+
+ // a command to hypervisor failed
+ VIR_ERR_OPERATION_FAILED = C.VIR_ERR_OPERATION_FAILED
+
+ // a HTTP GET command to failed
+ VIR_ERR_GET_FAILED = C.VIR_ERR_GET_FAILED
+
+ // a HTTP POST command to failed
+ VIR_ERR_POST_FAILED = C.VIR_ERR_POST_FAILED
+
+ // unexpected HTTP error code
+ VIR_ERR_HTTP_ERROR = C.VIR_ERR_HTTP_ERROR
+
+ // failure to serialize an S-Expr
+ VIR_ERR_SEXPR_SERIAL = C.VIR_ERR_SEXPR_SERIAL
+
+ // could not open Xen hypervisor control
+ VIR_ERR_NO_XEN = C.VIR_ERR_NO_XEN
+
+ // failure doing an hypervisor call
+ VIR_ERR_XEN_CALL = C.VIR_ERR_XEN_CALL
+
+ // unknown OS type
+ VIR_ERR_OS_TYPE = C.VIR_ERR_OS_TYPE
+
+ // missing kernel information
+ VIR_ERR_NO_KERNEL = C.VIR_ERR_NO_KERNEL
+
+ // missing root device information
+ VIR_ERR_NO_ROOT = C.VIR_ERR_NO_ROOT
+
+ // missing source device information
+ VIR_ERR_NO_SOURCE = C.VIR_ERR_NO_SOURCE
+
+ // missing target device information
+ VIR_ERR_NO_TARGET = C.VIR_ERR_NO_TARGET
+
+ // missing domain name information
+ VIR_ERR_NO_NAME = C.VIR_ERR_NO_NAME
+
+ // missing domain OS information
+ VIR_ERR_NO_OS = C.VIR_ERR_NO_OS
+
+ // missing domain devices information
+ VIR_ERR_NO_DEVICE = C.VIR_ERR_NO_DEVICE
+
+ // could not open Xen Store control
+ VIR_ERR_NO_XENSTORE = C.VIR_ERR_NO_XENSTORE
+
+ // too many drivers registered
+ VIR_ERR_DRIVER_FULL = C.VIR_ERR_DRIVER_FULL
+
+ // not supported by the drivers (DEPRECATED)
+ VIR_ERR_CALL_FAILED = C.VIR_ERR_CALL_FAILED
+
+ // an XML description is not well formed or broken
+ VIR_ERR_XML_ERROR = C.VIR_ERR_XML_ERROR
+
+ // the domain already exist
+ VIR_ERR_DOM_EXIST = C.VIR_ERR_DOM_EXIST
+
+ // operation forbidden on read-only connections
+ VIR_ERR_OPERATION_DENIED = C.VIR_ERR_OPERATION_DENIED
+
+ // failed to open a conf file
+ VIR_ERR_OPEN_FAILED = C.VIR_ERR_OPEN_FAILED
+
+ // failed to read a conf file
+ VIR_ERR_READ_FAILED = C.VIR_ERR_READ_FAILED
+
+ // failed to parse a conf file
+ VIR_ERR_PARSE_FAILED = C.VIR_ERR_PARSE_FAILED
+
+ // failed to parse the syntax of a conf file
+ VIR_ERR_CONF_SYNTAX = C.VIR_ERR_CONF_SYNTAX
+
+ // failed to write a conf file
+ VIR_ERR_WRITE_FAILED = C.VIR_ERR_WRITE_FAILED
+
+ // detail of an XML error
+ VIR_ERR_XML_DETAIL = C.VIR_ERR_XML_DETAIL
+
+ // invalid network object
+ VIR_ERR_INVALID_NETWORK = C.VIR_ERR_INVALID_NETWORK
+
+ // the network already exist
+ VIR_ERR_NETWORK_EXIST = C.VIR_ERR_NETWORK_EXIST
+
+ // general system call failure
+ VIR_ERR_SYSTEM_ERROR = C.VIR_ERR_SYSTEM_ERROR
+
+ // some sort of RPC error
+ VIR_ERR_RPC = C.VIR_ERR_RPC
+
+ // error from a GNUTLS call
+ VIR_ERR_GNUTLS_ERROR = C.VIR_ERR_GNUTLS_ERROR
+
+ // failed to start network
+ VIR_WAR_NO_NETWORK = C.VIR_WAR_NO_NETWORK
+
+ // domain not found or unexpectedly disappeared
+ VIR_ERR_NO_DOMAIN = C.VIR_ERR_NO_DOMAIN
+
+ // network not found
+ VIR_ERR_NO_NETWORK = C.VIR_ERR_NO_NETWORK
+
+ // invalid MAC address
+ VIR_ERR_INVALID_MAC = C.VIR_ERR_INVALID_MAC
+
+ // authentication failed
+ VIR_ERR_AUTH_FAILED = C.VIR_ERR_AUTH_FAILED
+
+ // invalid storage pool object
+ VIR_ERR_INVALID_STORAGE_POOL = C.VIR_ERR_INVALID_STORAGE_POOL
+
+ // invalid storage vol object
+ VIR_ERR_INVALID_STORAGE_VOL = C.VIR_ERR_INVALID_STORAGE_VOL
+
+ // failed to start storage
+ VIR_WAR_NO_STORAGE = C.VIR_WAR_NO_STORAGE
+
+ // storage pool not found
+ VIR_ERR_NO_STORAGE_POOL = C.VIR_ERR_NO_STORAGE_POOL
+
+ // storage volume not found
+ VIR_ERR_NO_STORAGE_VOL = C.VIR_ERR_NO_STORAGE_VOL
+
+ // failed to start node driver
+ VIR_WAR_NO_NODE = C.VIR_WAR_NO_NODE
+
+ // invalid node device object
+ VIR_ERR_INVALID_NODE_DEVICE = C.VIR_ERR_INVALID_NODE_DEVICE
+
+ // node device not found
+ VIR_ERR_NO_NODE_DEVICE = C.VIR_ERR_NO_NODE_DEVICE
+
+ // security model not found
+ VIR_ERR_NO_SECURITY_MODEL = C.VIR_ERR_NO_SECURITY_MODEL
+
+ // operation is not applicable at this time
+ VIR_ERR_OPERATION_INVALID = C.VIR_ERR_OPERATION_INVALID
+
+ // failed to start interface driver
+ VIR_WAR_NO_INTERFACE = C.VIR_WAR_NO_INTERFACE
+
+ // interface driver not running
+ VIR_ERR_NO_INTERFACE = C.VIR_ERR_NO_INTERFACE
+
+ // invalid interface object
+ VIR_ERR_INVALID_INTERFACE = C.VIR_ERR_INVALID_INTERFACE
+
+ // more than one matching interface found
+ VIR_ERR_MULTIPLE_INTERFACES = C.VIR_ERR_MULTIPLE_INTERFACES
+
+ // failed to start nwfilter driver
+ VIR_WAR_NO_NWFILTER = C.VIR_WAR_NO_NWFILTER
+
+ // invalid nwfilter object
+ VIR_ERR_INVALID_NWFILTER = C.VIR_ERR_INVALID_NWFILTER
+
+ // nw filter pool not found
+ VIR_ERR_NO_NWFILTER = C.VIR_ERR_NO_NWFILTER
+
+ // nw filter pool not found
+ VIR_ERR_BUILD_FIREWALL = C.VIR_ERR_BUILD_FIREWALL
+
+ // failed to start secret storage
+ VIR_WAR_NO_SECRET = C.VIR_WAR_NO_SECRET
+
+ // invalid secret
+ VIR_ERR_INVALID_SECRET = C.VIR_ERR_INVALID_SECRET
+
+ // secret not found
+ VIR_ERR_NO_SECRET = C.VIR_ERR_NO_SECRET
+
+ // unsupported configuration construct
+ VIR_ERR_CONFIG_UNSUPPORTED = C.VIR_ERR_CONFIG_UNSUPPORTED
+
+ // timeout occurred during operation
+ VIR_ERR_OPERATION_TIMEOUT = C.VIR_ERR_OPERATION_TIMEOUT
+
+ // a migration worked, but making the VM persist on the dest host failed
+ VIR_ERR_MIGRATE_PERSIST_FAILED = C.VIR_ERR_MIGRATE_PERSIST_FAILED
+
+ // a synchronous hook script failed
+ VIR_ERR_HOOK_SCRIPT_FAILED = C.VIR_ERR_HOOK_SCRIPT_FAILED
+
+ // invalid domain snapshot
+ VIR_ERR_INVALID_DOMAIN_SNAPSHOT = C.VIR_ERR_INVALID_DOMAIN_SNAPSHOT
+
+ // domain snapshot not found
+ VIR_ERR_NO_DOMAIN_SNAPSHOT = C.VIR_ERR_NO_DOMAIN_SNAPSHOT
+
+ // stream pointer not valid
+ VIR_ERR_INVALID_STREAM = C.VIR_ERR_INVALID_STREAM
+
+ // valid API use but unsupported by the given driver
+ VIR_ERR_ARGUMENT_UNSUPPORTED = C.VIR_ERR_ARGUMENT_UNSUPPORTED
+
+ // storage pool probe failed
+ VIR_ERR_STORAGE_PROBE_FAILED = C.VIR_ERR_STORAGE_PROBE_FAILED
+
+ // storage pool already built
+ VIR_ERR_STORAGE_POOL_BUILT = C.VIR_ERR_STORAGE_POOL_BUILT
+
+ // force was not requested for a risky domain snapshot revert
+ VIR_ERR_SNAPSHOT_REVERT_RISKY = C.VIR_ERR_SNAPSHOT_REVERT_RISKY
+
+ // operation on a domain was canceled/aborted by user
+ VIR_ERR_OPERATION_ABORTED = C.VIR_ERR_OPERATION_ABORTED
+
+ // authentication cancelled
+ VIR_ERR_AUTH_CANCELLED = C.VIR_ERR_AUTH_CANCELLED
+
+ // The metadata is not present
+ VIR_ERR_NO_DOMAIN_METADATA = C.VIR_ERR_NO_DOMAIN_METADATA
+
+ // Migration is not safe
+ VIR_ERR_MIGRATE_UNSAFE = C.VIR_ERR_MIGRATE_UNSAFE
+
+ // integer overflow
+ VIR_ERR_OVERFLOW = C.VIR_ERR_OVERFLOW
+
+ // action prevented by block copy job
+ VIR_ERR_BLOCK_COPY_ACTIVE = C.VIR_ERR_BLOCK_COPY_ACTIVE
+
+ // The requested operation is not supported
+ VIR_ERR_OPERATION_UNSUPPORTED = C.VIR_ERR_OPERATION_UNSUPPORTED
+
+ // error in ssh transport driver
+ VIR_ERR_SSH = C.VIR_ERR_SSH
+
+ // guest agent is unresponsive, not running or not usable
+ VIR_ERR_AGENT_UNRESPONSIVE = C.VIR_ERR_AGENT_UNRESPONSIVE
+
+ // resource is already in use
+ VIR_ERR_RESOURCE_BUSY = C.VIR_ERR_RESOURCE_BUSY
+
+ // operation on the object/resource was denied
+ VIR_ERR_ACCESS_DENIED = C.VIR_ERR_ACCESS_DENIED
+
+ // error from a dbus service
+ VIR_ERR_DBUS_SERVICE = C.VIR_ERR_DBUS_SERVICE
+
+ // the storage vol already exists
+ VIR_ERR_STORAGE_VOL_EXIST = C.VIR_ERR_STORAGE_VOL_EXIST
+
+ // given CPU is incompatible with host CPU
+ // added in libvirt 1.2.6
+ // VIR_ERR_CPU_INCOMPATIBLE = C.VIR_ERR_CPU_INCOMPATIBLE
+)
+
+// virErrorDomain
+const (
+ VIR_FROM_NONE = C.VIR_FROM_NONE
+
+ // Error at Xen hypervisor layer
+ VIR_FROM_XEN = C.VIR_FROM_XEN
+
+ // Error at connection with xend daemon
+ VIR_FROM_XEND = C.VIR_FROM_XEND
+
+ // Error at connection with xen store
+ VIR_FROM_XENSTORE = C.VIR_FROM_XENSTORE
+
+ // Error in the S-Expression code
+ VIR_FROM_SEXPR = C.VIR_FROM_SEXPR
+
+ // Error in the XML code
+ VIR_FROM_XML = C.VIR_FROM_XML
+
+ // Error when operating on a domain
+ VIR_FROM_DOM = C.VIR_FROM_DOM
+
+ // Error in the XML-RPC code
+ VIR_FROM_RPC = C.VIR_FROM_RPC
+
+ // Error in the proxy code; unused since 0.8.6
+ VIR_FROM_PROXY = C.VIR_FROM_PROXY
+
+ // Error in the configuration file handling
+ VIR_FROM_CONF = C.VIR_FROM_CONF
+
+ // Error at the QEMU daemon
+ VIR_FROM_QEMU = C.VIR_FROM_QEMU
+
+ // Error when operating on a network
+ VIR_FROM_NET = C.VIR_FROM_NET
+
+ // Error from test driver
+ VIR_FROM_TEST = C.VIR_FROM_TEST
+
+ // Error from remote driver
+ VIR_FROM_REMOTE = C.VIR_FROM_REMOTE
+
+ // Error from OpenVZ driver
+ VIR_FROM_OPENVZ = C.VIR_FROM_OPENVZ
+
+ // Error at Xen XM layer
+ VIR_FROM_XENXM = C.VIR_FROM_XENXM
+
+ // Error in the Linux Stats code
+ VIR_FROM_STATS_LINUX = C.VIR_FROM_STATS_LINUX
+
+ // Error from Linux Container driver
+ VIR_FROM_LXC = C.VIR_FROM_LXC
+
+ // Error from storage driver
+ VIR_FROM_STORAGE = C.VIR_FROM_STORAGE
+
+ // Error from network config
+ VIR_FROM_NETWORK = C.VIR_FROM_NETWORK
+
+ // Error from domain config
+ VIR_FROM_DOMAIN = C.VIR_FROM_DOMAIN
+
+ // Error at the UML driver
+ VIR_FROM_UML = C.VIR_FROM_UML
+
+ // Error from node device monitor
+ VIR_FROM_NODEDEV = C.VIR_FROM_NODEDEV
+
+ // Error from xen inotify layer
+ VIR_FROM_XEN_INOTIFY = C.VIR_FROM_XEN_INOTIFY
+
+ // Error from security framework
+ VIR_FROM_SECURITY = C.VIR_FROM_SECURITY
+
+ // Error from VirtualBox driver
+ VIR_FROM_VBOX = C.VIR_FROM_VBOX
+
+ // Error when operating on an interface
+ VIR_FROM_INTERFACE = C.VIR_FROM_INTERFACE
+
+ // The OpenNebula driver no longer exists. Retained for ABI/API compat only
+ VIR_FROM_ONE = C.VIR_FROM_ONE
+
+ // Error from ESX driver
+ VIR_FROM_ESX = C.VIR_FROM_ESX
+
+ // Error from IBM power hypervisor
+ VIR_FROM_PHYP = C.VIR_FROM_PHYP
+
+ // Error from secret storage
+ VIR_FROM_SECRET = C.VIR_FROM_SECRET
+
+ // Error from CPU driver
+ VIR_FROM_CPU = C.VIR_FROM_CPU
+
+ // Error from XenAPI
+ VIR_FROM_XENAPI = C.VIR_FROM_XENAPI
+
+ // Error from network filter driver
+ VIR_FROM_NWFILTER = C.VIR_FROM_NWFILTER
+
+ // Error from Synchronous hooks
+ VIR_FROM_HOOK = C.VIR_FROM_HOOK
+
+ // Error from domain snapshot
+ VIR_FROM_DOMAIN_SNAPSHOT = C.VIR_FROM_DOMAIN_SNAPSHOT
+
+ // Error from auditing subsystem
+ VIR_FROM_AUDIT = C.VIR_FROM_AUDIT
+
+ // Error from sysinfo/SMBIOS
+ VIR_FROM_SYSINFO = C.VIR_FROM_SYSINFO
+
+ // Error from I/O streams
+ VIR_FROM_STREAMS = C.VIR_FROM_STREAMS
+
+ // Error from VMware driver
+ VIR_FROM_VMWARE = C.VIR_FROM_VMWARE
+
+ // Error from event loop impl
+ VIR_FROM_EVENT = C.VIR_FROM_EVENT
+
+ // Error from libxenlight driver
+ VIR_FROM_LIBXL = C.VIR_FROM_LIBXL
+
+ // Error from lock manager
+ VIR_FROM_LOCKING = C.VIR_FROM_LOCKING
+
+ // Error from Hyper-V driver
+ VIR_FROM_HYPERV = C.VIR_FROM_HYPERV
+
+ // Error from capabilities
+ VIR_FROM_CAPABILITIES = C.VIR_FROM_CAPABILITIES
+
+ // Error from URI handling
+ VIR_FROM_URI = C.VIR_FROM_URI
+
+ // Error from auth handling
+ VIR_FROM_AUTH = C.VIR_FROM_AUTH
+
+ // Error from DBus
+ VIR_FROM_DBUS = C.VIR_FROM_DBUS
+
+ // Error from Parallels
+ VIR_FROM_PARALLELS = C.VIR_FROM_PARALLELS
+
+ // Error from Device
+ VIR_FROM_DEVICE = C.VIR_FROM_DEVICE
+
+ // Error from libssh2 connection transport
+ VIR_FROM_SSH = C.VIR_FROM_SSH
+
+ // Error from lockspace
+ VIR_FROM_LOCKSPACE = C.VIR_FROM_LOCKSPACE
+
+ // Error from initctl device communication
+ VIR_FROM_INITCTL = C.VIR_FROM_INITCTL
+
+ // Error from identity code
+ VIR_FROM_IDENTITY = C.VIR_FROM_IDENTITY
+
+ // Error from cgroups
+ VIR_FROM_CGROUP = C.VIR_FROM_CGROUP
+
+ // Error from access control manager
+ VIR_FROM_ACCESS = C.VIR_FROM_ACCESS
+
+ // Error from systemd code
+ VIR_FROM_SYSTEMD = C.VIR_FROM_SYSTEMD
+
+ // Error from bhyve driver
+ VIR_FROM_BHYVE = C.VIR_FROM_BHYVE
+
+ // Error from crypto code
+ VIR_FROM_CRYPTO = C.VIR_FROM_CRYPTO
+
+ // Error from firewall
+ VIR_FROM_FIREWALL = C.VIR_FROM_FIREWALL
+)
+
+type VirError struct {
+ Code int
+ Domain int
+ Message string
+ Level int
+}
+
+func (err VirError) Error() string {
+ return fmt.Sprintf("[Code-%d] [Domain-%d] %s",
+ err.Code, err.Domain, err.Message)
+}
diff --git a/vendor/github.com/dmacvicar/libvirt-go/events.go b/vendor/github.com/dmacvicar/libvirt-go/events.go
new file mode 100644
index 00000000..6f75eb88
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/events.go
@@ -0,0 +1,467 @@
+package libvirt
+
+import (
+ "sync"
+ "unsafe"
+)
+
+/*
+ * Golang 1.6 doesn't support C pointers to go memory.
+ * A hacky-solution might be some multi-threaded approach to support domain events, but let's make it work
+ * without domain events for now.
+ */
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+
+int domainEventLifecycleCallback_cgo(virConnectPtr c, virDomainPtr d,
+ int event, int detail, void* data);
+
+int domainEventGenericCallback_cgo(virConnectPtr c, virDomainPtr d, void* data);
+
+int domainEventRTCChangeCallback_cgo(virConnectPtr c, virDomainPtr d,
+ long long utcoffset, void* data);
+
+int domainEventWatchdogCallback_cgo(virConnectPtr c, virDomainPtr d,
+ int action, void* data);
+
+int domainEventIOErrorCallback_cgo(virConnectPtr c, virDomainPtr d,
+ const char *srcPath, const char *devAlias,
+ int action, void* data);
+
+int domainEventGraphicsCallback_cgo(virConnectPtr c, virDomainPtr d,
+ int phase, const virDomainEventGraphicsAddress *local,
+ const virDomainEventGraphicsAddress *remote,
+ const char *authScheme,
+ const virDomainEventGraphicsSubject *subject, void* data);
+
+int domainEventIOErrorReasonCallback_cgo(virConnectPtr c, virDomainPtr d,
+ const char *srcPath, const char *devAlias,
+ int action, const char *reason, void* data);
+
+int domainEventBlockJobCallback_cgo(virConnectPtr c, virDomainPtr d,
+ const char *disk, int type, int status, void* data);
+
+int domainEventDiskChangeCallback_cgo(virConnectPtr c, virDomainPtr d,
+ const char *oldSrcPath, const char *newSrcPath,
+ const char *devAlias, int reason, void* data);
+
+int domainEventTrayChangeCallback_cgo(virConnectPtr c, virDomainPtr d,
+ const char *devAlias, int reason, void* data);
+
+int domainEventReasonCallback_cgo(virConnectPtr c, virDomainPtr d,
+ int reason, void* data);
+
+int domainEventBalloonChangeCallback_cgo(virConnectPtr c, virDomainPtr d,
+ unsigned long long actual, void* data);
+
+int domainEventDeviceRemovedCallback_cgo(virConnectPtr c, virDomainPtr d,
+ const char *devAlias, void* data);
+
+int virConnectDomainEventRegisterAny_cgo(virConnectPtr c, virDomainPtr d,
+ int eventID, virConnectDomainEventGenericCallback cb,
+ int goCallbackId);
+*/
+import "C"
+
+type DomainLifecycleEvent struct {
+ Event int
+ Detail int
+}
+
+type DomainRTCChangeEvent struct {
+ Utcoffset int64
+}
+
+type DomainWatchdogEvent struct {
+ Action int
+}
+
+type DomainIOErrorEvent struct {
+ SrcPath string
+ DevAlias string
+ Action int
+}
+
+type DomainEventGraphicsAddress struct {
+ Family int
+ Node string
+ Service string
+}
+
+type DomainEventGraphicsSubjectIdentity struct {
+ Type string
+ Name string
+}
+
+type DomainGraphicsEvent struct {
+ Phase int
+ Local DomainEventGraphicsAddress
+ Remote DomainEventGraphicsAddress
+ AuthScheme string
+ Subject []DomainEventGraphicsSubjectIdentity
+}
+
+type DomainIOErrorReasonEvent struct {
+ DomainIOErrorEvent
+ Reason string
+}
+
+type DomainBlockJobEvent struct {
+ Disk string
+ Type int
+ Status int
+}
+
+type DomainDiskChangeEvent struct {
+ OldSrcPath string
+ NewSrcPath string
+ DevAlias string
+ Reason int
+}
+
+type DomainTrayChangeEvent struct {
+ DevAlias string
+ Reason int
+}
+
+type DomainReasonEvent struct {
+ Reason int
+}
+
+type DomainBalloonChangeEvent struct {
+ Actual uint64
+}
+
+type DomainDeviceRemovedEvent struct {
+ DevAlias string
+}
+
+//export domainEventLifecycleCallback
+func domainEventLifecycleCallback(c C.virConnectPtr, d C.virDomainPtr,
+ event int, detail int,
+ opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ eventDetails := DomainLifecycleEvent{
+ Event: event,
+ Detail: detail,
+ }
+
+ return callCallback(opaque, &connection, &domain, eventDetails)
+}
+
+//export domainEventGenericCallback
+func domainEventGenericCallback(c C.virConnectPtr, d C.virDomainPtr,
+ opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ return callCallback(opaque, &connection, &domain, nil)
+}
+
+//export domainEventRTCChangeCallback
+func domainEventRTCChangeCallback(c C.virConnectPtr, d C.virDomainPtr,
+ utcoffset int64, opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ eventDetails := DomainRTCChangeEvent{
+ Utcoffset: utcoffset,
+ }
+
+ return callCallback(opaque, &connection, &domain, eventDetails)
+}
+
+//export domainEventWatchdogCallback
+func domainEventWatchdogCallback(c C.virConnectPtr, d C.virDomainPtr,
+ action int, opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ eventDetails := DomainWatchdogEvent{
+ Action: action,
+ }
+
+ return callCallback(opaque, &connection, &domain, eventDetails)
+}
+
+//export domainEventIOErrorCallback
+func domainEventIOErrorCallback(c C.virConnectPtr, d C.virDomainPtr,
+ srcPath string, devAlias string, action int, opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ eventDetails := DomainIOErrorEvent{
+ SrcPath: srcPath,
+ DevAlias: devAlias,
+ Action: action,
+ }
+
+ return callCallback(opaque, &connection, &domain, eventDetails)
+}
+
+//export domainEventGraphicsCallback
+func domainEventGraphicsCallback(c C.virConnectPtr, d C.virDomainPtr,
+ phase int,
+ local C.virDomainEventGraphicsAddressPtr,
+ remote C.virDomainEventGraphicsAddressPtr,
+ authScheme string,
+ subject C.virDomainEventGraphicsSubjectPtr,
+ opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ subjectGo := make([]DomainEventGraphicsSubjectIdentity, subject.nidentity)
+ nidentities := int(subject.nidentity)
+ identities := (*[1 << 30]C.virDomainEventGraphicsSubjectIdentity)(unsafe.Pointer(&subject.identities))[:nidentities:nidentities]
+ for _, identity := range identities {
+ subjectGo = append(subjectGo,
+ DomainEventGraphicsSubjectIdentity{
+ Type: C.GoString(identity._type),
+ Name: C.GoString(identity.name),
+ },
+ )
+ }
+
+ eventDetails := DomainGraphicsEvent{
+ Phase: phase,
+ Local: DomainEventGraphicsAddress{
+ Family: int(local.family),
+ Node: C.GoString(local.node),
+ Service: C.GoString(local.service),
+ },
+ Remote: DomainEventGraphicsAddress{
+ Family: int(remote.family),
+ Node: C.GoString(remote.node),
+ Service: C.GoString(remote.service),
+ },
+ AuthScheme: authScheme,
+ Subject: subjectGo,
+ }
+
+ return callCallback(opaque, &connection, &domain, eventDetails)
+}
+
+//export domainEventIOErrorReasonCallback
+func domainEventIOErrorReasonCallback(c C.virConnectPtr, d C.virDomainPtr,
+ srcPath string, devAlias string, action int, reason string,
+ opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ eventDetails := DomainIOErrorReasonEvent{
+ DomainIOErrorEvent: DomainIOErrorEvent{
+ SrcPath: srcPath,
+ DevAlias: devAlias,
+ Action: action,
+ },
+ Reason: reason,
+ }
+
+ return callCallback(opaque, &connection, &domain, eventDetails)
+}
+
+//export domainEventBlockJobCallback
+func domainEventBlockJobCallback(c C.virConnectPtr, d C.virDomainPtr,
+ disk string, _type int, status int, opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ eventDetails := DomainBlockJobEvent{
+ Disk: disk,
+ Type: _type,
+ Status: status,
+ }
+
+ return callCallback(opaque, &connection, &domain, eventDetails)
+}
+
+//export domainEventDiskChangeCallback
+func domainEventDiskChangeCallback(c C.virConnectPtr, d C.virDomainPtr,
+ oldSrcPath string, newSrcPath string, devAlias string,
+ reason int, opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ eventDetails := DomainDiskChangeEvent{
+ OldSrcPath: oldSrcPath,
+ NewSrcPath: newSrcPath,
+ DevAlias: devAlias,
+ Reason: reason,
+ }
+
+ return callCallback(opaque, &connection, &domain, eventDetails)
+}
+
+//export domainEventTrayChangeCallback
+func domainEventTrayChangeCallback(c C.virConnectPtr, d C.virDomainPtr,
+ devAlias string, reason int, opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ eventDetails := DomainTrayChangeEvent{
+ DevAlias: devAlias,
+ Reason: reason,
+ }
+
+ return callCallback(opaque, &connection, &domain, eventDetails)
+}
+
+//export domainEventReasonCallback
+func domainEventReasonCallback(c C.virConnectPtr, d C.virDomainPtr,
+ reason int, opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ eventDetails := DomainReasonEvent{
+ Reason: reason,
+ }
+
+ return callCallback(opaque, &connection, &domain, eventDetails)
+}
+
+//export domainEventBalloonChangeCallback
+func domainEventBalloonChangeCallback(c C.virConnectPtr, d C.virDomainPtr,
+ actual uint64, opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ eventDetails := DomainBalloonChangeEvent{
+ Actual: actual,
+ }
+
+ return callCallback(opaque, &connection, &domain, eventDetails)
+}
+
+//export domainEventDeviceRemovedCallback
+func domainEventDeviceRemovedCallback(c C.virConnectPtr, d C.virDomainPtr,
+ devAlias string, opaque int) int {
+
+ domain := VirDomain{ptr: d}
+ connection := VirConnection{ptr: c}
+
+ eventDetails := DomainDeviceRemovedEvent{
+ DevAlias: devAlias,
+ }
+ return callCallback(opaque, &connection, &domain, eventDetails)
+}
+
+type DomainEventCallback func(c *VirConnection, d *VirDomain,
+ event interface{}, f func()) int
+
+type domainCallbackContext struct {
+ cb *DomainEventCallback
+ f func()
+}
+
+var goCallbackLock sync.RWMutex
+var goCallbacks = make(map[int]*domainCallbackContext)
+var nextGoCallbackId int
+
+//export freeCallbackId
+func freeCallbackId(goCallbackId int) {
+ goCallbackLock.Lock()
+ delete(goCallbacks, goCallbackId)
+ goCallbackLock.Unlock()
+}
+
+func callCallback(goCallbackId int, c *VirConnection, d *VirDomain,
+ event interface{}) int {
+ goCallbackLock.RLock()
+ ctx := goCallbacks[goCallbackId]
+ goCallbackLock.RUnlock()
+ if ctx == nil {
+ // If this happens there must be a bug in libvirt
+ panic("Callback arrived after freeCallbackId was called")
+ }
+ return (*ctx.cb)(c, d, event, ctx.f)
+}
+
+func (c *VirConnection) DomainEventRegister(dom VirDomain,
+ eventId int,
+ callback *DomainEventCallback,
+ opaque func()) int {
+ var callbackPtr unsafe.Pointer
+ context := &domainCallbackContext{
+ cb: callback,
+ f: opaque,
+ }
+ goCallbackLock.Lock()
+ goCallBackId := nextGoCallbackId
+ nextGoCallbackId++
+ goCallbacks[goCallBackId] = context
+ goCallbackLock.Unlock()
+
+ switch eventId {
+ case VIR_DOMAIN_EVENT_ID_LIFECYCLE:
+ callbackPtr = unsafe.Pointer(C.domainEventLifecycleCallback_cgo)
+ case VIR_DOMAIN_EVENT_ID_REBOOT:
+ case VIR_DOMAIN_EVENT_ID_CONTROL_ERROR:
+ callbackPtr = unsafe.Pointer(C.domainEventGenericCallback_cgo)
+ case VIR_DOMAIN_EVENT_ID_RTC_CHANGE:
+ callbackPtr = unsafe.Pointer(C.domainEventRTCChangeCallback_cgo)
+ case VIR_DOMAIN_EVENT_ID_WATCHDOG:
+ callbackPtr = unsafe.Pointer(C.domainEventWatchdogCallback_cgo)
+ case VIR_DOMAIN_EVENT_ID_IO_ERROR:
+ callbackPtr = unsafe.Pointer(C.domainEventIOErrorCallback_cgo)
+ case VIR_DOMAIN_EVENT_ID_GRAPHICS:
+ callbackPtr = unsafe.Pointer(C.domainEventGraphicsCallback_cgo)
+ case VIR_DOMAIN_EVENT_ID_IO_ERROR_REASON:
+ callbackPtr = unsafe.Pointer(C.domainEventIOErrorReasonCallback_cgo)
+ case VIR_DOMAIN_EVENT_ID_BLOCK_JOB:
+ // TODO Post 1.2.4, uncomment later
+ // case VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2:
+ callbackPtr = unsafe.Pointer(C.domainEventBlockJobCallback_cgo)
+ case VIR_DOMAIN_EVENT_ID_DISK_CHANGE:
+ callbackPtr = unsafe.Pointer(C.domainEventDiskChangeCallback_cgo)
+ case VIR_DOMAIN_EVENT_ID_TRAY_CHANGE:
+ callbackPtr = unsafe.Pointer(C.domainEventTrayChangeCallback_cgo)
+ case VIR_DOMAIN_EVENT_ID_PMWAKEUP:
+ case VIR_DOMAIN_EVENT_ID_PMSUSPEND:
+ case VIR_DOMAIN_EVENT_ID_PMSUSPEND_DISK:
+ callbackPtr = unsafe.Pointer(C.domainEventReasonCallback_cgo)
+ case VIR_DOMAIN_EVENT_ID_BALLOON_CHANGE:
+ callbackPtr = unsafe.Pointer(C.domainEventBalloonChangeCallback_cgo)
+ case VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED:
+ callbackPtr = unsafe.Pointer(C.domainEventDeviceRemovedCallback_cgo)
+ default:
+ }
+ ret := C.virConnectDomainEventRegisterAny_cgo(c.ptr, dom.ptr, C.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
+ C.virConnectDomainEventGenericCallback(callbackPtr),
+ C.int(goCallBackId))
+ if ret == -1 {
+ goCallbackLock.Lock()
+ delete(goCallbacks, goCallBackId)
+ goCallbackLock.Unlock()
+ return -1
+ }
+ return goCallBackId
+}
+
+func (c *VirConnection) DomainEventDeregister(callbackId int) int {
+ // Deregister the callback
+ return int(C.virConnectDomainEventDeregisterAny(c.ptr, C.int(callbackId)))
+}
+
+func EventRegisterDefaultImpl() int {
+ return int(C.virEventRegisterDefaultImpl())
+}
+
+func EventRunDefaultImpl() int {
+ return int(C.virEventRunDefaultImpl())
+}
diff --git a/vendor/github.com/dmacvicar/libvirt-go/interface.go b/vendor/github.com/dmacvicar/libvirt-go/interface.go
new file mode 100644
index 00000000..60f0c06b
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/interface.go
@@ -0,0 +1,87 @@
+package libvirt
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+*/
+import "C"
+
+import (
+ "unsafe"
+)
+
+type VirInterface struct {
+ ptr C.virInterfacePtr
+}
+
+func (n *VirInterface) Create(flags uint32) error {
+ result := C.virInterfaceCreate(n.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (n *VirInterface) Destroy(flags uint32) error {
+ result := C.virInterfaceDestroy(n.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (n *VirInterface) IsActive() (bool, error) {
+ result := C.virInterfaceIsActive(n.ptr)
+ if result == -1 {
+ return false, GetLastError()
+ }
+ if result == 1 {
+ return true, nil
+ }
+ return false, nil
+}
+
+func (n *VirInterface) GetMACString() (string, error) {
+ result := C.virInterfaceGetMACString(n.ptr)
+ if result == nil {
+ return "", GetLastError()
+ }
+ mac := C.GoString(result)
+ return mac, nil
+}
+
+func (n *VirInterface) GetName() (string, error) {
+ result := C.virInterfaceGetName(n.ptr)
+ if result == nil {
+ return "", GetLastError()
+ }
+ name := C.GoString(result)
+ return name, nil
+}
+
+func (n *VirInterface) GetXMLDesc(flags uint32) (string, error) {
+ result := C.virInterfaceGetXMLDesc(n.ptr, C.uint(flags))
+ if result == nil {
+ return "", GetLastError()
+ }
+ xml := C.GoString(result)
+ C.free(unsafe.Pointer(result))
+ return xml, nil
+}
+
+func (n *VirInterface) Undefine() error {
+ result := C.virInterfaceUndefine(n.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (n *VirInterface) Free() error {
+ if result := C.virInterfaceFree(n.ptr); result != 0 {
+ return GetLastError()
+ }
+ return nil
+}
diff --git a/vendor/github.com/dmacvicar/libvirt-go/libvirt.go b/vendor/github.com/dmacvicar/libvirt-go/libvirt.go
new file mode 100644
index 00000000..2248ecd2
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/libvirt.go
@@ -0,0 +1,811 @@
+package libvirt
+
+import (
+ "encoding/base64"
+ "io/ioutil"
+ "reflect"
+ "unsafe"
+)
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+
+void virErrorFuncDummy(void *userData, virErrorPtr error);
+
+void virErrorFuncDummy(void *userData, virErrorPtr error)
+{
+}
+
+*/
+import "C"
+
+func init() {
+ // libvirt won't print to stderr
+ C.virSetErrorFunc(nil, C.virErrorFunc(unsafe.Pointer(C.virErrorFuncDummy)))
+}
+
+type VirConnection struct {
+ ptr C.virConnectPtr
+}
+
+func NewVirConnection(uri string) (VirConnection, error) {
+ var cUri *C.char
+ if uri != "" {
+ cUri = C.CString(uri)
+ defer C.free(unsafe.Pointer(cUri))
+ }
+ ptr := C.virConnectOpen(cUri)
+ if ptr == nil {
+ return VirConnection{}, GetLastError()
+ }
+ obj := VirConnection{ptr: ptr}
+ return obj, nil
+}
+
+func NewVirConnectionReadOnly(uri string) (VirConnection, error) {
+ var cUri *C.char
+ if uri != "" {
+ cUri = C.CString(uri)
+ defer C.free(unsafe.Pointer(cUri))
+ }
+ ptr := C.virConnectOpenReadOnly(cUri)
+ if ptr == nil {
+ return VirConnection{}, GetLastError()
+ }
+ obj := VirConnection{ptr: ptr}
+ return obj, nil
+}
+
+func GetLastError() VirError {
+ var virErr VirError
+ err := C.virGetLastError()
+
+ virErr.Code = int(err.code)
+ virErr.Domain = int(err.domain)
+ virErr.Message = C.GoString(err.message)
+ virErr.Level = int(err.level)
+
+ C.virResetError(err)
+ return virErr
+}
+
+func (c *VirConnection) CloseConnection() (int, error) {
+ result := int(C.virConnectClose(c.ptr))
+ if result == -1 {
+ return result, GetLastError()
+ }
+ return result, nil
+}
+
+func (c *VirConnection) UnrefAndCloseConnection() error {
+ closeRes := 1
+ var err error
+ for closeRes > 0 {
+ closeRes, err = c.CloseConnection()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *VirConnection) GetCapabilities() (string, error) {
+ str := C.virConnectGetCapabilities(c.ptr)
+ if str == nil {
+ return "", GetLastError()
+ }
+ capabilities := C.GoString(str)
+ C.free(unsafe.Pointer(str))
+ return capabilities, nil
+}
+
+func (c *VirConnection) GetNodeInfo() (VirNodeInfo, error) {
+ ni := VirNodeInfo{}
+ var ptr C.virNodeInfo
+ result := C.virNodeGetInfo(c.ptr, (*C.virNodeInfo)(unsafe.Pointer(&ptr)))
+ if result == -1 {
+ return ni, GetLastError()
+ }
+ ni.ptr = ptr
+ return ni, nil
+}
+
+func (c *VirConnection) GetHostname() (string, error) {
+ str := C.virConnectGetHostname(c.ptr)
+ if str == nil {
+ return "", GetLastError()
+ }
+ hostname := C.GoString(str)
+ C.free(unsafe.Pointer(str))
+ return hostname, nil
+}
+
+func (c *VirConnection) GetLibVersion() (uint32, error) {
+ var version C.ulong
+ if err := C.virConnectGetLibVersion(c.ptr, &version); err < 0 {
+ return 0, GetLastError()
+ }
+ return uint32(version), nil
+}
+
+func (c *VirConnection) GetType() (string, error) {
+ str := C.virConnectGetType(c.ptr)
+ if str == nil {
+ return "", GetLastError()
+ }
+ hypDriver := C.GoString(str)
+ return hypDriver, nil
+}
+
+func (c *VirConnection) IsAlive() (bool, error) {
+ result := C.virConnectIsAlive(c.ptr)
+ if result == -1 {
+ return false, GetLastError()
+ }
+ if result == 1 {
+ return true, nil
+ }
+ return false, nil
+}
+
+func (c *VirConnection) IsEncrypted() (bool, error) {
+ result := C.virConnectIsEncrypted(c.ptr)
+ if result == -1 {
+ return false, GetLastError()
+ }
+ if result == 1 {
+ return true, nil
+ }
+ return false, nil
+}
+
+func (c *VirConnection) IsSecure() (bool, error) {
+ result := C.virConnectIsSecure(c.ptr)
+ if result == -1 {
+ return false, GetLastError()
+ }
+ if result == 1 {
+ return true, nil
+ }
+ return false, nil
+}
+
+func (c *VirConnection) ListDefinedDomains() ([]string, error) {
+ var names [1024](*C.char)
+ namesPtr := unsafe.Pointer(&names)
+ numDomains := C.virConnectListDefinedDomains(
+ c.ptr,
+ (**C.char)(namesPtr),
+ 1024)
+ if numDomains == -1 {
+ return nil, GetLastError()
+ }
+ goNames := make([]string, numDomains)
+ for k := 0; k < int(numDomains); k++ {
+ goNames[k] = C.GoString(names[k])
+ C.free(unsafe.Pointer(names[k]))
+ }
+ return goNames, nil
+}
+
+func (c *VirConnection) ListDomains() ([]uint32, error) {
+ var cDomainsIds [512](uint32)
+ cDomainsPointer := unsafe.Pointer(&cDomainsIds)
+ numDomains := C.virConnectListDomains(c.ptr, (*C.int)(cDomainsPointer), 512)
+ if numDomains == -1 {
+ return nil, GetLastError()
+ }
+
+ return cDomainsIds[:numDomains], nil
+}
+
+func (c *VirConnection) ListInterfaces() ([]string, error) {
+ const maxIfaces = 1024
+ var names [maxIfaces](*C.char)
+ namesPtr := unsafe.Pointer(&names)
+ numIfaces := C.virConnectListInterfaces(
+ c.ptr,
+ (**C.char)(namesPtr),
+ maxIfaces)
+ if numIfaces == -1 {
+ return nil, GetLastError()
+ }
+ goNames := make([]string, numIfaces)
+ for k := 0; k < int(numIfaces); k++ {
+ goNames[k] = C.GoString(names[k])
+ C.free(unsafe.Pointer(names[k]))
+ }
+ return goNames, nil
+}
+
+func (c *VirConnection) ListNetworks() ([]string, error) {
+ const maxNets = 1024
+ var names [maxNets](*C.char)
+ namesPtr := unsafe.Pointer(&names)
+ numNetworks := C.virConnectListNetworks(
+ c.ptr,
+ (**C.char)(namesPtr),
+ maxNets)
+ if numNetworks == -1 {
+ return nil, GetLastError()
+ }
+ goNames := make([]string, numNetworks)
+ for k := 0; k < int(numNetworks); k++ {
+ goNames[k] = C.GoString(names[k])
+ C.free(unsafe.Pointer(names[k]))
+ }
+ return goNames, nil
+}
+
+func (c *VirConnection) ListStoragePools() ([]string, error) {
+ const maxPools = 1024
+ var names [maxPools](*C.char)
+ namesPtr := unsafe.Pointer(&names)
+ numStoragePools := C.virConnectListStoragePools(
+ c.ptr,
+ (**C.char)(namesPtr),
+ maxPools)
+ if numStoragePools == -1 {
+ return nil, GetLastError()
+ }
+ goNames := make([]string, numStoragePools)
+ for k := 0; k < int(numStoragePools); k++ {
+ goNames[k] = C.GoString(names[k])
+ C.free(unsafe.Pointer(names[k]))
+ }
+ return goNames, nil
+}
+
+func (c *VirConnection) LookupDomainById(id uint32) (VirDomain, error) {
+ ptr := C.virDomainLookupByID(c.ptr, C.int(id))
+ if ptr == nil {
+ return VirDomain{}, GetLastError()
+ }
+ return VirDomain{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupDomainByName(id string) (VirDomain, error) {
+ cName := C.CString(id)
+ defer C.free(unsafe.Pointer(cName))
+ ptr := C.virDomainLookupByName(c.ptr, cName)
+ if ptr == nil {
+ return VirDomain{}, GetLastError()
+ }
+ return VirDomain{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupByUUIDString(uuid string) (VirDomain, error) {
+ cUuid := C.CString(uuid)
+ defer C.free(unsafe.Pointer(cUuid))
+ ptr := C.virDomainLookupByUUIDString(c.ptr, cUuid)
+ if ptr == nil {
+ return VirDomain{}, GetLastError()
+ }
+ return VirDomain{ptr: ptr}, nil
+}
+
+func (c *VirConnection) DomainCreateXMLFromFile(xmlFile string, flags uint32) (VirDomain, error) {
+ xmlConfig, err := ioutil.ReadFile(xmlFile)
+ if err != nil {
+ return VirDomain{}, err
+ }
+ return c.DomainCreateXML(string(xmlConfig), flags)
+}
+
+func (c *VirConnection) DomainCreateXML(xmlConfig string, flags uint32) (VirDomain, error) {
+ cXml := C.CString(string(xmlConfig))
+ defer C.free(unsafe.Pointer(cXml))
+ ptr := C.virDomainCreateXML(c.ptr, cXml, C.uint(flags))
+ if ptr == nil {
+ return VirDomain{}, GetLastError()
+ }
+ return VirDomain{ptr: ptr}, nil
+}
+
+func (c *VirConnection) DomainDefineXMLFromFile(xmlFile string) (VirDomain, error) {
+ xmlConfig, err := ioutil.ReadFile(xmlFile)
+ if err != nil {
+ return VirDomain{}, err
+ }
+ return c.DomainDefineXML(string(xmlConfig))
+}
+
+func (c *VirConnection) DomainDefineXML(xmlConfig string) (VirDomain, error) {
+ cXml := C.CString(string(xmlConfig))
+ defer C.free(unsafe.Pointer(cXml))
+ ptr := C.virDomainDefineXML(c.ptr, cXml)
+ if ptr == nil {
+ return VirDomain{}, GetLastError()
+ }
+ return VirDomain{ptr: ptr}, nil
+}
+
+func (c *VirConnection) ListDefinedInterfaces() ([]string, error) {
+ const maxIfaces = 1024
+ var names [maxIfaces](*C.char)
+ namesPtr := unsafe.Pointer(&names)
+ numIfaces := C.virConnectListDefinedInterfaces(
+ c.ptr,
+ (**C.char)(namesPtr),
+ maxIfaces)
+ if numIfaces == -1 {
+ return nil, GetLastError()
+ }
+ goNames := make([]string, numIfaces)
+ for k := 0; k < int(numIfaces); k++ {
+ goNames[k] = C.GoString(names[k])
+ C.free(unsafe.Pointer(names[k]))
+ }
+ return goNames, nil
+}
+
+func (c *VirConnection) ListDefinedNetworks() ([]string, error) {
+ const maxNets = 1024
+ var names [maxNets](*C.char)
+ namesPtr := unsafe.Pointer(&names)
+ numNetworks := C.virConnectListDefinedNetworks(
+ c.ptr,
+ (**C.char)(namesPtr),
+ maxNets)
+ if numNetworks == -1 {
+ return nil, GetLastError()
+ }
+ goNames := make([]string, numNetworks)
+ for k := 0; k < int(numNetworks); k++ {
+ goNames[k] = C.GoString(names[k])
+ C.free(unsafe.Pointer(names[k]))
+ }
+ return goNames, nil
+}
+
+func (c *VirConnection) ListDefinedStoragePools() ([]string, error) {
+ const maxPools = 1024
+ var names [maxPools](*C.char)
+ namesPtr := unsafe.Pointer(&names)
+ numStoragePools := C.virConnectListDefinedStoragePools(
+ c.ptr,
+ (**C.char)(namesPtr),
+ maxPools)
+ if numStoragePools == -1 {
+ return nil, GetLastError()
+ }
+ goNames := make([]string, numStoragePools)
+ for k := 0; k < int(numStoragePools); k++ {
+ goNames[k] = C.GoString(names[k])
+ C.free(unsafe.Pointer(names[k]))
+ }
+ return goNames, nil
+}
+
+func (c *VirConnection) NumOfDefinedInterfaces() (int, error) {
+ result := int(C.virConnectNumOfDefinedInterfaces(c.ptr))
+ if result == -1 {
+ return 0, GetLastError()
+ }
+ return result, nil
+}
+
+func (c *VirConnection) NumOfDefinedNetworks() (int, error) {
+ result := int(C.virConnectNumOfDefinedNetworks(c.ptr))
+ if result == -1 {
+ return 0, GetLastError()
+ }
+ return result, nil
+}
+
+func (c *VirConnection) NumOfDefinedStoragePools() (int, error) {
+ result := int(C.virConnectNumOfDefinedStoragePools(c.ptr))
+ if result == -1 {
+ return 0, GetLastError()
+ }
+ return result, nil
+}
+
+func (c *VirConnection) NumOfDomains() (int, error) {
+ result := int(C.virConnectNumOfDomains(c.ptr))
+ if result == -1 {
+ return 0, GetLastError()
+ }
+ return result, nil
+}
+
+func (c *VirConnection) NumOfInterfaces() (int, error) {
+ result := int(C.virConnectNumOfInterfaces(c.ptr))
+ if result == -1 {
+ return 0, GetLastError()
+ }
+ return result, nil
+}
+
+func (c *VirConnection) NumOfNetworks() (int, error) {
+ result := int(C.virConnectNumOfNetworks(c.ptr))
+ if result == -1 {
+ return 0, GetLastError()
+ }
+ return result, nil
+}
+
+func (c *VirConnection) NumOfNWFilters() (int, error) {
+ result := int(C.virConnectNumOfNWFilters(c.ptr))
+ if result == -1 {
+ return 0, GetLastError()
+ }
+ return result, nil
+}
+
+func (c *VirConnection) NumOfSecrets() (int, error) {
+ result := int(C.virConnectNumOfSecrets(c.ptr))
+ if result == -1 {
+ return 0, GetLastError()
+ }
+ return result, nil
+}
+
+func (c *VirConnection) NetworkDefineXMLFromFile(xmlFile string) (VirNetwork, error) {
+ xmlConfig, err := ioutil.ReadFile(xmlFile)
+ if err != nil {
+ return VirNetwork{}, err
+ }
+ return c.NetworkDefineXML(string(xmlConfig))
+}
+
+func (c *VirConnection) NetworkDefineXML(xmlConfig string) (VirNetwork, error) {
+ cXml := C.CString(string(xmlConfig))
+ defer C.free(unsafe.Pointer(cXml))
+ ptr := C.virNetworkDefineXML(c.ptr, cXml)
+ if ptr == nil {
+ return VirNetwork{}, GetLastError()
+ }
+ return VirNetwork{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupNetworkByName(name string) (VirNetwork, error) {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+ ptr := C.virNetworkLookupByName(c.ptr, cName)
+ if ptr == nil {
+ return VirNetwork{}, GetLastError()
+ }
+ return VirNetwork{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupNetworkByUUIDString(uuid string) (VirNetwork, error) {
+ cUuid := C.CString(uuid)
+ defer C.free(unsafe.Pointer(cUuid))
+ ptr := C.virNetworkLookupByUUIDString(c.ptr, cUuid)
+ if ptr == nil {
+ return VirNetwork{}, GetLastError()
+ }
+ return VirNetwork{ptr: ptr}, nil
+}
+
+func (c *VirConnection) GetSysinfo(flags uint) (string, error) {
+ cStr := C.virConnectGetSysinfo(c.ptr, C.uint(flags))
+ if cStr == nil {
+ return "", GetLastError()
+ }
+ info := C.GoString(cStr)
+ C.free(unsafe.Pointer(cStr))
+ return info, nil
+}
+
+func (c *VirConnection) GetURI() (string, error) {
+ cStr := C.virConnectGetURI(c.ptr)
+ if cStr == nil {
+ return "", GetLastError()
+ }
+ uri := C.GoString(cStr)
+ C.free(unsafe.Pointer(cStr))
+ return uri, nil
+}
+
+func (c *VirConnection) GetMaxVcpus(typeAttr string) (int, error) {
+ var cTypeAttr *C.char
+ if typeAttr != "" {
+ cTypeAttr = C.CString(typeAttr)
+ defer C.free(unsafe.Pointer(cTypeAttr))
+ }
+ result := int(C.virConnectGetMaxVcpus(c.ptr, cTypeAttr))
+ if result == -1 {
+ return 0, GetLastError()
+ }
+ return result, nil
+}
+
+func (c *VirConnection) InterfaceDefineXMLFromFile(xmlFile string) (VirInterface, error) {
+ xmlConfig, err := ioutil.ReadFile(xmlFile)
+ if err != nil {
+ return VirInterface{}, err
+ }
+ return c.InterfaceDefineXML(string(xmlConfig), 0)
+}
+
+func (c *VirConnection) InterfaceDefineXML(xmlConfig string, flags uint32) (VirInterface, error) {
+ cXml := C.CString(string(xmlConfig))
+ defer C.free(unsafe.Pointer(cXml))
+ ptr := C.virInterfaceDefineXML(c.ptr, cXml, C.uint(flags))
+ if ptr == nil {
+ return VirInterface{}, GetLastError()
+ }
+ return VirInterface{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupInterfaceByName(name string) (VirInterface, error) {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+ ptr := C.virInterfaceLookupByName(c.ptr, cName)
+ if ptr == nil {
+ return VirInterface{}, GetLastError()
+ }
+ return VirInterface{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupInterfaceByMACString(mac string) (VirInterface, error) {
+ cName := C.CString(mac)
+ defer C.free(unsafe.Pointer(cName))
+ ptr := C.virInterfaceLookupByMACString(c.ptr, cName)
+ if ptr == nil {
+ return VirInterface{}, GetLastError()
+ }
+ return VirInterface{ptr: ptr}, nil
+}
+
+func (c *VirConnection) StoragePoolDefineXMLFromFile(xmlFile string) (VirStoragePool, error) {
+ xmlConfig, err := ioutil.ReadFile(xmlFile)
+ if err != nil {
+ return VirStoragePool{}, err
+ }
+ return c.StoragePoolDefineXML(string(xmlConfig), 0)
+}
+
+func (c *VirConnection) StoragePoolDefineXML(xmlConfig string, flags uint32) (VirStoragePool, error) {
+ cXml := C.CString(string(xmlConfig))
+ defer C.free(unsafe.Pointer(cXml))
+ ptr := C.virStoragePoolDefineXML(c.ptr, cXml, C.uint(flags))
+ if ptr == nil {
+ return VirStoragePool{}, GetLastError()
+ }
+ return VirStoragePool{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupStoragePoolByName(name string) (VirStoragePool, error) {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+ ptr := C.virStoragePoolLookupByName(c.ptr, cName)
+ if ptr == nil {
+ return VirStoragePool{}, GetLastError()
+ }
+ return VirStoragePool{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupStoragePoolByUUIDString(uuid string) (VirStoragePool, error) {
+ cUuid := C.CString(uuid)
+ defer C.free(unsafe.Pointer(cUuid))
+ ptr := C.virStoragePoolLookupByUUIDString(c.ptr, cUuid)
+ if ptr == nil {
+ return VirStoragePool{}, GetLastError()
+ }
+ return VirStoragePool{ptr: ptr}, nil
+}
+
+func (c *VirConnection) NWFilterDefineXMLFromFile(xmlFile string) (VirNWFilter, error) {
+ xmlConfig, err := ioutil.ReadFile(xmlFile)
+ if err != nil {
+ return VirNWFilter{}, err
+ }
+ return c.NWFilterDefineXML(string(xmlConfig))
+}
+
+func (c *VirConnection) NWFilterDefineXML(xmlConfig string) (VirNWFilter, error) {
+ cXml := C.CString(string(xmlConfig))
+ defer C.free(unsafe.Pointer(cXml))
+ ptr := C.virNWFilterDefineXML(c.ptr, cXml)
+ if ptr == nil {
+ return VirNWFilter{}, GetLastError()
+ }
+ return VirNWFilter{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupNWFilterByName(name string) (VirNWFilter, error) {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+ ptr := C.virNWFilterLookupByName(c.ptr, cName)
+ if ptr == nil {
+ return VirNWFilter{}, GetLastError()
+ }
+ return VirNWFilter{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupNWFilterByUUIDString(uuid string) (VirNWFilter, error) {
+ cUuid := C.CString(uuid)
+ defer C.free(unsafe.Pointer(cUuid))
+ ptr := C.virNWFilterLookupByUUIDString(c.ptr, cUuid)
+ if ptr == nil {
+ return VirNWFilter{}, GetLastError()
+ }
+ return VirNWFilter{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupStorageVolByKey(key string) (VirStorageVol, error) {
+ cKey := C.CString(key)
+ defer C.free(unsafe.Pointer(cKey))
+ ptr := C.virStorageVolLookupByKey(c.ptr, cKey)
+ if ptr == nil {
+ return VirStorageVol{}, GetLastError()
+ }
+ return VirStorageVol{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupStorageVolByPath(path string) (VirStorageVol, error) {
+ cPath := C.CString(path)
+ defer C.free(unsafe.Pointer(cPath))
+ ptr := C.virStorageVolLookupByPath(c.ptr, cPath)
+ if ptr == nil {
+ return VirStorageVol{}, GetLastError()
+ }
+ return VirStorageVol{ptr: ptr}, nil
+}
+
+func (c *VirConnection) SecretDefineXMLFromFile(xmlFile string) (VirSecret, error) {
+ xmlConfig, err := ioutil.ReadFile(xmlFile)
+ if err != nil {
+ return VirSecret{}, err
+ }
+ return c.SecretDefineXML(string(xmlConfig), 0)
+}
+
+func (c *VirConnection) SecretDefineXML(xmlConfig string, flags uint32) (VirSecret, error) {
+ cXml := C.CString(string(xmlConfig))
+ defer C.free(unsafe.Pointer(cXml))
+ ptr := C.virSecretDefineXML(c.ptr, cXml, C.uint(flags))
+ if ptr == nil {
+ return VirSecret{}, GetLastError()
+ }
+ return VirSecret{ptr: ptr}, nil
+}
+
+func (c *VirConnection) SecretSetValue(uuid, value string) error {
+ cUuid := C.CString(uuid)
+ defer C.free(unsafe.Pointer(cUuid))
+ ptr := C.virSecretLookupByUUIDString(c.ptr, cUuid)
+ if ptr == nil {
+ return GetLastError()
+ }
+
+ secret, err := base64.StdEncoding.DecodeString(value)
+ if err != nil {
+ return err
+ }
+ cSecret := C.CString(string(secret))
+ defer C.free(unsafe.Pointer(cSecret))
+
+ res := C.virSecretSetValue(ptr, (*C.uchar)(unsafe.Pointer(cSecret)), C.size_t(len(secret)), 0)
+ if res != 0 {
+ return GetLastError()
+ }
+
+ return nil
+}
+
+func (c *VirConnection) LookupSecretByUUIDString(uuid string) (VirSecret, error) {
+ cUuid := C.CString(uuid)
+ defer C.free(unsafe.Pointer(cUuid))
+ ptr := C.virSecretLookupByUUIDString(c.ptr, cUuid)
+ if ptr == nil {
+ return VirSecret{}, GetLastError()
+ }
+ return VirSecret{ptr: ptr}, nil
+}
+
+func (c *VirConnection) LookupSecretByUsage(usageType int, usageID string) (VirSecret, error) {
+ cUsageID := C.CString(usageID)
+ defer C.free(unsafe.Pointer(cUsageID))
+ ptr := C.virSecretLookupByUsage(c.ptr, C.int(usageType), cUsageID)
+ if ptr == nil {
+ return VirSecret{}, GetLastError()
+ }
+ return VirSecret{ptr: ptr}, nil
+}
+
+func (c *VirConnection) ListAllInterfaces(flags uint32) ([]VirInterface, error) {
+ var cList *C.virInterfacePtr
+ numIfaces := C.virConnectListAllInterfaces(c.ptr, (**C.virInterfacePtr)(&cList), C.uint(flags))
+ if numIfaces == -1 {
+ return nil, GetLastError()
+ }
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cList)),
+ Len: int(numIfaces),
+ Cap: int(numIfaces),
+ }
+ var ifaces []VirInterface
+ slice := *(*[]C.virInterfacePtr)(unsafe.Pointer(&hdr))
+ for _, ptr := range slice {
+ ifaces = append(ifaces, VirInterface{ptr})
+ }
+ C.free(unsafe.Pointer(cList))
+ return ifaces, nil
+}
+
+func (c *VirConnection) ListAllNetworks(flags uint32) ([]VirNetwork, error) {
+ var cList *C.virNetworkPtr
+ numNets := C.virConnectListAllNetworks(c.ptr, (**C.virNetworkPtr)(&cList), C.uint(flags))
+ if numNets == -1 {
+ return nil, GetLastError()
+ }
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cList)),
+ Len: int(numNets),
+ Cap: int(numNets),
+ }
+ var nets []VirNetwork
+ slice := *(*[]C.virNetworkPtr)(unsafe.Pointer(&hdr))
+ for _, ptr := range slice {
+ nets = append(nets, VirNetwork{ptr})
+ }
+ C.free(unsafe.Pointer(cList))
+ return nets, nil
+}
+
+func (c *VirConnection) ListAllDomains(flags uint32) ([]VirDomain, error) {
+ var cList *C.virDomainPtr
+ numDomains := C.virConnectListAllDomains(c.ptr, (**C.virDomainPtr)(&cList), C.uint(flags))
+ if numDomains == -1 {
+ return nil, GetLastError()
+ }
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cList)),
+ Len: int(numDomains),
+ Cap: int(numDomains),
+ }
+ var domains []VirDomain
+ slice := *(*[]C.virDomainPtr)(unsafe.Pointer(&hdr))
+ for _, ptr := range slice {
+ domains = append(domains, VirDomain{ptr})
+ }
+ C.free(unsafe.Pointer(cList))
+ return domains, nil
+}
+
+func (c *VirConnection) ListAllNWFilters(flags uint32) ([]VirNWFilter, error) {
+ var cList *C.virNWFilterPtr
+ numNWFilters := C.virConnectListAllNWFilters(c.ptr, (**C.virNWFilterPtr)(&cList), C.uint(flags))
+ if numNWFilters == -1 {
+ return nil, GetLastError()
+ }
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cList)),
+ Len: int(numNWFilters),
+ Cap: int(numNWFilters),
+ }
+ var filters []VirNWFilter
+ slice := *(*[]C.virNWFilterPtr)(unsafe.Pointer(&hdr))
+ for _, ptr := range slice {
+ filters = append(filters, VirNWFilter{ptr})
+ }
+ C.free(unsafe.Pointer(cList))
+ return filters, nil
+}
+
+func (c *VirConnection) ListAllStoragePools(flags uint32) ([]VirStoragePool, error) {
+ var cList *C.virStoragePoolPtr
+ numPools := C.virConnectListAllStoragePools(c.ptr, (**C.virStoragePoolPtr)(&cList), C.uint(flags))
+ if numPools == -1 {
+ return nil, GetLastError()
+ }
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(cList)),
+ Len: int(numPools),
+ Cap: int(numPools),
+ }
+ var pools []VirStoragePool
+ slice := *(*[]C.virStoragePoolPtr)(unsafe.Pointer(&hdr))
+ for _, ptr := range slice {
+ pools = append(pools, VirStoragePool{ptr})
+ }
+ C.free(unsafe.Pointer(cList))
+ return pools, nil
+}
diff --git a/vendor/github.com/dmacvicar/libvirt-go/network.go b/vendor/github.com/dmacvicar/libvirt-go/network.go
new file mode 100644
index 00000000..b50d954b
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/network.go
@@ -0,0 +1,187 @@
+package libvirt
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+*/
+import "C"
+
+import (
+ "unsafe"
+)
+
+const (
+ VIR_NETWORK_UPDATE_COMMAND_NONE = C.VIR_NETWORK_UPDATE_COMMAND_NONE
+ VIR_NETWORK_UPDATE_COMMAND_MODIFY = C.VIR_NETWORK_UPDATE_COMMAND_MODIFY
+ VIR_NETWORK_UPDATE_COMMAND_DELETE = C.VIR_NETWORK_UPDATE_COMMAND_DELETE
+ VIR_NETWORK_UPDATE_COMMAND_ADD_LAST = C.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST
+ VIR_NETWORK_UPDATE_COMMAND_ADD_FIRST = C.VIR_NETWORK_UPDATE_COMMAND_ADD_FIRST
+)
+
+const (
+ VIR_NETWORK_UPDATE_AFFECT_CURRENT = C.VIR_NETWORK_UPDATE_AFFECT_CURRENT
+ VIR_NETWORK_UPDATE_AFFECT_LIVE = C.VIR_NETWORK_UPDATE_AFFECT_LIVE
+ VIR_NETWORK_UPDATE_AFFECT_CONFIG = C.VIR_NETWORK_UPDATE_AFFECT_CONFIG
+)
+
+const (
+ VIR_NETWORK_SECTION_NONE = C.VIR_NETWORK_SECTION_NONE
+ VIR_NETWORK_SECTION_BRIDGE = C.VIR_NETWORK_SECTION_BRIDGE
+ VIR_NETWORK_SECTION_DOMAIN = C.VIR_NETWORK_SECTION_DOMAIN
+ VIR_NETWORK_SECTION_IP = C.VIR_NETWORK_SECTION_IP
+ VIR_NETWORK_SECTION_IP_DHCP_HOST = C.VIR_NETWORK_SECTION_IP_DHCP_HOST
+ VIR_NETWORK_SECTION_IP_DHCP_RANGE = C.VIR_NETWORK_SECTION_IP_DHCP_RANGE
+ VIR_NETWORK_SECTION_FORWARD = C.VIR_NETWORK_SECTION_FORWARD
+ VIR_NETWORK_SECTION_FORWARD_INTERFACE = C.VIR_NETWORK_SECTION_FORWARD_INTERFACE
+ VIR_NETWORK_SECTION_FORWARD_PF = C.VIR_NETWORK_SECTION_FORWARD_PF
+ VIR_NETWORK_SECTION_PORTGROUP = C.VIR_NETWORK_SECTION_PORTGROUP
+ VIR_NETWORK_SECTION_DNS_HOST = C.VIR_NETWORK_SECTION_DNS_HOST
+ VIR_NETWORK_SECTION_DNS_TXT = C.VIR_NETWORK_SECTION_DNS_TXT
+ VIR_NETWORK_SECTION_DNS_SRV = C.VIR_NETWORK_SECTION_DNS_SRV
+)
+
+type VirNetwork struct {
+ ptr C.virNetworkPtr
+}
+
+func (n *VirNetwork) Free() error {
+ if result := C.virNetworkFree(n.ptr); result != 0 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (n *VirNetwork) Create() error {
+ result := C.virNetworkCreate(n.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (n *VirNetwork) Destroy() error {
+ result := C.virNetworkDestroy(n.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (n *VirNetwork) IsActive() (bool, error) {
+ result := C.virNetworkIsActive(n.ptr)
+ if result == -1 {
+ return false, GetLastError()
+ }
+ if result == 1 {
+ return true, nil
+ }
+ return false, nil
+}
+
+func (n *VirNetwork) IsPersistent() (bool, error) {
+ result := C.virNetworkIsPersistent(n.ptr)
+ if result == -1 {
+ return false, GetLastError()
+ }
+ if result == 1 {
+ return true, nil
+ }
+ return false, nil
+}
+
+func (n *VirNetwork) GetAutostart() (bool, error) {
+ var out C.int
+ result := C.virNetworkGetAutostart(n.ptr, (*C.int)(unsafe.Pointer(&out)))
+ if result == -1 {
+ return false, GetLastError()
+ }
+ switch out {
+ case 1:
+ return true, nil
+ default:
+ return false, nil
+ }
+}
+
+func (n *VirNetwork) SetAutostart(autostart bool) error {
+ var cAutostart C.int
+ switch autostart {
+ case true:
+ cAutostart = 1
+ default:
+ cAutostart = 0
+ }
+ result := C.virNetworkSetAutostart(n.ptr, cAutostart)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (n *VirNetwork) GetName() (string, error) {
+ name := C.virNetworkGetName(n.ptr)
+ if name == nil {
+ return "", GetLastError()
+ }
+ return C.GoString(name), nil
+}
+
+func (n *VirNetwork) GetUUID() ([]byte, error) {
+ var cUuid [C.VIR_UUID_BUFLEN](byte)
+ cuidPtr := unsafe.Pointer(&cUuid)
+ result := C.virNetworkGetUUID(n.ptr, (*C.uchar)(cuidPtr))
+ if result != 0 {
+ return []byte{}, GetLastError()
+ }
+ return C.GoBytes(cuidPtr, C.VIR_UUID_BUFLEN), nil
+}
+
+func (n *VirNetwork) GetUUIDString() (string, error) {
+ var cUuid [C.VIR_UUID_STRING_BUFLEN](C.char)
+ cuidPtr := unsafe.Pointer(&cUuid)
+ result := C.virNetworkGetUUIDString(n.ptr, (*C.char)(cuidPtr))
+ if result != 0 {
+ return "", GetLastError()
+ }
+ return C.GoString((*C.char)(cuidPtr)), nil
+}
+
+func (n *VirNetwork) GetBridgeName() (string, error) {
+ result := C.virNetworkGetBridgeName(n.ptr)
+ if result == nil {
+ return "", GetLastError()
+ }
+ bridge := C.GoString(result)
+ C.free(unsafe.Pointer(result))
+ return bridge, nil
+}
+
+func (n *VirNetwork) GetXMLDesc(flags uint32) (string, error) {
+ result := C.virNetworkGetXMLDesc(n.ptr, C.uint(flags))
+ if result == nil {
+ return "", GetLastError()
+ }
+ xml := C.GoString(result)
+ C.free(unsafe.Pointer(result))
+ return xml, nil
+}
+
+func (n *VirNetwork) UpdateXMLDesc(xmldesc string, command, section int) error {
+ xmldescC := C.CString(xmldesc)
+ result := C.virNetworkUpdate(n.ptr, C.uint(command), C.uint(section), C.int(-1), xmldescC, C.uint(C.VIR_NETWORK_UPDATE_AFFECT_CURRENT))
+ C.free(unsafe.Pointer(xmldescC))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (n *VirNetwork) Undefine() error {
+ result := C.virNetworkUndefine(n.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
diff --git a/vendor/github.com/dmacvicar/libvirt-go/nodeinfo.go b/vendor/github.com/dmacvicar/libvirt-go/nodeinfo.go
new file mode 100644
index 00000000..e3fd7e68
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/nodeinfo.go
@@ -0,0 +1,50 @@
+package libvirt
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+*/
+import "C"
+
+import (
+ "unsafe"
+)
+
+type VirNodeInfo struct {
+ ptr C.virNodeInfo
+}
+
+func (ni *VirNodeInfo) GetModel() string {
+ model := C.GoString((*C.char)(unsafe.Pointer(&ni.ptr.model)))
+ return model
+}
+
+func (ni *VirNodeInfo) GetMemoryKB() uint64 {
+ return uint64(ni.ptr.memory)
+}
+
+func (ni *VirNodeInfo) GetCPUs() uint32 {
+ return uint32(ni.ptr.cpus)
+}
+
+func (ni *VirNodeInfo) GetMhz() uint32 {
+ return uint32(ni.ptr.mhz)
+}
+
+func (ni *VirNodeInfo) GetNodes() uint32 {
+ return uint32(ni.ptr.nodes)
+}
+
+func (ni *VirNodeInfo) GetSockets() uint32 {
+ return uint32(ni.ptr.sockets)
+}
+
+func (ni *VirNodeInfo) GetCores() uint32 {
+ return uint32(ni.ptr.cores)
+}
+
+func (ni *VirNodeInfo) GetThreads() uint32 {
+ return uint32(ni.ptr.threads)
+}
diff --git a/vendor/github.com/dmacvicar/libvirt-go/nwfilter.go b/vendor/github.com/dmacvicar/libvirt-go/nwfilter.go
new file mode 100644
index 00000000..d196f02f
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/nwfilter.go
@@ -0,0 +1,70 @@
+package libvirt
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+*/
+import "C"
+
+import (
+ "unsafe"
+)
+
+type VirNWFilter struct {
+ ptr C.virNWFilterPtr
+}
+
+func (f *VirNWFilter) Free() error {
+ if result := C.virNWFilterFree(f.ptr); result != 0 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (f *VirNWFilter) GetName() (string, error) {
+ name := C.virNWFilterGetName(f.ptr)
+ if name == nil {
+ return "", GetLastError()
+ }
+ return C.GoString(name), nil
+}
+
+func (f *VirNWFilter) Undefine() error {
+ result := C.virNWFilterUndefine(f.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (f *VirNWFilter) GetUUID() ([]byte, error) {
+ var cUuid [C.VIR_UUID_BUFLEN](byte)
+ cuidPtr := unsafe.Pointer(&cUuid)
+ result := C.virNWFilterGetUUID(f.ptr, (*C.uchar)(cuidPtr))
+ if result != 0 {
+ return []byte{}, GetLastError()
+ }
+ return C.GoBytes(cuidPtr, C.VIR_UUID_BUFLEN), nil
+}
+
+func (f *VirNWFilter) GetUUIDString() (string, error) {
+ var cUuid [C.VIR_UUID_STRING_BUFLEN](C.char)
+ cuidPtr := unsafe.Pointer(&cUuid)
+ result := C.virNWFilterGetUUIDString(f.ptr, (*C.char)(cuidPtr))
+ if result != 0 {
+ return "", GetLastError()
+ }
+ return C.GoString((*C.char)(cuidPtr)), nil
+}
+
+func (f *VirNWFilter) GetXMLDesc(flags uint32) (string, error) {
+ result := C.virNWFilterGetXMLDesc(f.ptr, C.uint(flags))
+ if result == nil {
+ return "", GetLastError()
+ }
+ xml := C.GoString(result)
+ C.free(unsafe.Pointer(result))
+ return xml, nil
+}
diff --git a/vendor/github.com/dmacvicar/libvirt-go/secret.go b/vendor/github.com/dmacvicar/libvirt-go/secret.go
new file mode 100644
index 00000000..f96360dd
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/secret.go
@@ -0,0 +1,78 @@
+package libvirt
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+*/
+import "C"
+
+import (
+ "unsafe"
+)
+
+type VirSecret struct {
+ ptr C.virSecretPtr
+}
+
+func (s *VirSecret) Free() error {
+ if result := C.virSecretFree(s.ptr); result != 0 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (s *VirSecret) Undefine() error {
+ result := C.virSecretUndefine(s.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (s *VirSecret) GetUUID() ([]byte, error) {
+ var cUuid [C.VIR_UUID_BUFLEN](byte)
+ cuidPtr := unsafe.Pointer(&cUuid)
+ result := C.virSecretGetUUID(s.ptr, (*C.uchar)(cuidPtr))
+ if result != 0 {
+ return []byte{}, GetLastError()
+ }
+ return C.GoBytes(cuidPtr, C.VIR_UUID_BUFLEN), nil
+}
+
+func (s *VirSecret) GetUUIDString() (string, error) {
+ var cUuid [C.VIR_UUID_STRING_BUFLEN](C.char)
+ cuidPtr := unsafe.Pointer(&cUuid)
+ result := C.virSecretGetUUIDString(s.ptr, (*C.char)(cuidPtr))
+ if result != 0 {
+ return "", GetLastError()
+ }
+ return C.GoString((*C.char)(cuidPtr)), nil
+}
+
+func (s *VirSecret) GetUsageID() (string, error) {
+ result := C.virSecretGetUsageID(s.ptr)
+ if result == nil {
+ return "", GetLastError()
+ }
+ return C.GoString(result), nil
+}
+
+func (s *VirSecret) GetUsageType() (int, error) {
+ result := int(C.virSecretGetUsageType(s.ptr))
+ if result == -1 {
+ return 0, GetLastError()
+ }
+ return result, nil
+}
+
+func (s *VirSecret) GetXMLDesc(flags uint32) (string, error) {
+ result := C.virSecretGetXMLDesc(s.ptr, C.uint(flags))
+ if result == nil {
+ return "", GetLastError()
+ }
+ xml := C.GoString(result)
+ C.free(unsafe.Pointer(result))
+ return xml, nil
+}
diff --git a/vendor/github.com/dmacvicar/libvirt-go/snapshot.go b/vendor/github.com/dmacvicar/libvirt-go/snapshot.go
new file mode 100644
index 00000000..fb91fc51
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/snapshot.go
@@ -0,0 +1,95 @@
+package libvirt
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+*/
+import "C"
+
+import (
+ "unsafe"
+)
+
+type VirDomainSnapshot struct {
+ ptr C.virDomainSnapshotPtr
+}
+
+func (s *VirDomainSnapshot) Free() error {
+ if result := C.virDomainSnapshotFree(s.ptr); result != 0 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (s *VirDomainSnapshot) Delete(flags uint32) error {
+ result := C.virDomainSnapshotDelete(s.ptr, C.uint(flags))
+ if result != 0 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (s *VirDomainSnapshot) RevertToSnapshot(flags uint32) error {
+ result := C.virDomainRevertToSnapshot(s.ptr, C.uint(flags))
+ if result != 0 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) CreateSnapshotXML(xml string, flags uint32) (VirDomainSnapshot, error) {
+ cXml := C.CString(xml)
+ defer C.free(unsafe.Pointer(cXml))
+ result := C.virDomainSnapshotCreateXML(d.ptr, cXml, C.uint(flags))
+ if result == nil {
+ return VirDomainSnapshot{}, GetLastError()
+ }
+ return VirDomainSnapshot{ptr: result}, nil
+}
+
+func (d *VirDomain) Save(destFile string) error {
+ cPath := C.CString(destFile)
+ defer C.free(unsafe.Pointer(cPath))
+ result := C.virDomainSave(d.ptr, cPath)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (d *VirDomain) SaveFlags(destFile string, destXml string, flags uint32) error {
+ cDestFile := C.CString(destFile)
+ cDestXml := C.CString(destXml)
+ defer C.free(unsafe.Pointer(cDestXml))
+ defer C.free(unsafe.Pointer(cDestFile))
+ result := C.virDomainSaveFlags(d.ptr, cDestFile, cDestXml, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (conn VirConnection) Restore(srcFile string) error {
+ cPath := C.CString(srcFile)
+ defer C.free(unsafe.Pointer(cPath))
+ if result := C.virDomainRestore(conn.ptr, cPath); result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (conn VirConnection) RestoreFlags(srcFile, xmlConf string, flags uint32) error {
+ cPath := C.CString(srcFile)
+ defer C.free(unsafe.Pointer(cPath))
+ var cXmlConf *C.char
+ if xmlConf != "" {
+ cXmlConf = C.CString(xmlConf)
+ defer C.free(unsafe.Pointer(cXmlConf))
+ }
+ if result := C.virDomainRestoreFlags(conn.ptr, cPath, cXmlConf, C.uint(flags)); result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
diff --git a/vendor/github.com/dmacvicar/libvirt-go/storage_pool.go b/vendor/github.com/dmacvicar/libvirt-go/storage_pool.go
new file mode 100644
index 00000000..fc49e4d2
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/storage_pool.go
@@ -0,0 +1,210 @@
+package libvirt
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+*/
+import "C"
+
+import (
+ "io/ioutil"
+ "unsafe"
+)
+
+type VirStoragePool struct {
+ ptr C.virStoragePoolPtr
+}
+
+type VirStoragePoolInfo struct {
+ ptr C.virStoragePoolInfo
+}
+
+func (p *VirStoragePool) Build(flags uint32) error {
+ result := C.virStoragePoolBuild(p.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (p *VirStoragePool) Create(flags uint32) error {
+ result := C.virStoragePoolCreate(p.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (p *VirStoragePool) Delete(flags uint32) error {
+ result := C.virStoragePoolDelete(p.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (p *VirStoragePool) Destroy() error {
+ result := C.virStoragePoolDestroy(p.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (p *VirStoragePool) Free() error {
+ if result := C.virStoragePoolFree(p.ptr); result != 0 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (p *VirStoragePool) GetAutostart() (bool, error) {
+ var out C.int
+ result := C.virStoragePoolGetAutostart(p.ptr, (*C.int)(unsafe.Pointer(&out)))
+ if result == -1 {
+ return false, GetLastError()
+ }
+ switch out {
+ case 1:
+ return true, nil
+ default:
+ return false, nil
+ }
+}
+
+func (p *VirStoragePool) GetInfo() (VirStoragePoolInfo, error) {
+ pi := VirStoragePoolInfo{}
+ var ptr C.virStoragePoolInfo
+ result := C.virStoragePoolGetInfo(p.ptr, (*C.virStoragePoolInfo)(unsafe.Pointer(&ptr)))
+ if result == -1 {
+ return pi, GetLastError()
+ }
+ pi.ptr = ptr
+ return pi, nil
+}
+
+func (p *VirStoragePool) GetName() (string, error) {
+ name := C.virStoragePoolGetName(p.ptr)
+ if name == nil {
+ return "", GetLastError()
+ }
+ return C.GoString(name), nil
+}
+
+func (p *VirStoragePool) GetUUID() ([]byte, error) {
+ var cUuid [C.VIR_UUID_BUFLEN](byte)
+ cuidPtr := unsafe.Pointer(&cUuid)
+ result := C.virStoragePoolGetUUID(p.ptr, (*C.uchar)(cuidPtr))
+ if result != 0 {
+ return []byte{}, GetLastError()
+ }
+ return C.GoBytes(cuidPtr, C.VIR_UUID_BUFLEN), nil
+}
+
+func (p *VirStoragePool) GetUUIDString() (string, error) {
+ var cUuid [C.VIR_UUID_STRING_BUFLEN](C.char)
+ cuidPtr := unsafe.Pointer(&cUuid)
+ result := C.virStoragePoolGetUUIDString(p.ptr, (*C.char)(cuidPtr))
+ if result != 0 {
+ return "", GetLastError()
+ }
+ return C.GoString((*C.char)(cuidPtr)), nil
+}
+
+func (p *VirStoragePool) GetXMLDesc(flags uint32) (string, error) {
+ result := C.virStoragePoolGetXMLDesc(p.ptr, C.uint(flags))
+ if result == nil {
+ return "", GetLastError()
+ }
+ xml := C.GoString(result)
+ C.free(unsafe.Pointer(result))
+ return xml, nil
+}
+
+func (p *VirStoragePool) IsActive() (bool, error) {
+ result := C.virStoragePoolIsActive(p.ptr)
+ if result == -1 {
+ return false, GetLastError()
+ }
+ if result == 1 {
+ return true, nil
+ }
+ return false, nil
+}
+
+func (p *VirStoragePool) SetAutostart(autostart bool) error {
+ var cAutostart C.int
+ switch autostart {
+ case true:
+ cAutostart = 1
+ default:
+ cAutostart = 0
+ }
+ result := C.virStoragePoolSetAutostart(p.ptr, cAutostart)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (p *VirStoragePool) Refresh(flags uint32) error {
+ result := C.virStoragePoolRefresh(p.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (p *VirStoragePool) Undefine() error {
+ result := C.virStoragePoolUndefine(p.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (i *VirStoragePoolInfo) GetState() uint8 {
+ return uint8(i.ptr.state)
+}
+
+func (i *VirStoragePoolInfo) GetCapacityInBytes() uint64 {
+ return uint64(i.ptr.capacity)
+}
+
+func (i *VirStoragePoolInfo) GetAllocationInBytes() uint64 {
+ return uint64(i.ptr.allocation)
+}
+
+func (i *VirStoragePoolInfo) GetAvailableInBytes() uint64 {
+ return uint64(i.ptr.available)
+}
+
+func (p *VirStoragePool) StorageVolCreateXMLFromFile(xmlFile string, flags uint32) (VirStorageVol, error) {
+ xmlConfig, err := ioutil.ReadFile(xmlFile)
+ if err != nil {
+ return VirStorageVol{}, err
+ }
+ return p.StorageVolCreateXML(string(xmlConfig), flags)
+}
+
+func (p *VirStoragePool) StorageVolCreateXML(xmlConfig string, flags uint32) (VirStorageVol, error) {
+ cXml := C.CString(string(xmlConfig))
+ defer C.free(unsafe.Pointer(cXml))
+ ptr := C.virStorageVolCreateXML(p.ptr, cXml, C.uint(flags))
+ if ptr == nil {
+ return VirStorageVol{}, GetLastError()
+ }
+ return VirStorageVol{ptr: ptr}, nil
+}
+
+func (p *VirStoragePool) LookupStorageVolByName(name string) (VirStorageVol, error) {
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+ ptr := C.virStorageVolLookupByName(p.ptr, cName)
+ if ptr == nil {
+ return VirStorageVol{}, GetLastError()
+ }
+ return VirStorageVol{ptr: ptr}, nil
+}
diff --git a/vendor/github.com/dmacvicar/libvirt-go/storage_volume.go b/vendor/github.com/dmacvicar/libvirt-go/storage_volume.go
new file mode 100644
index 00000000..c0417675
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/storage_volume.go
@@ -0,0 +1,144 @@
+package libvirt
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+*/
+import "C"
+
+import (
+ "unsafe"
+)
+
+type VirStorageVol struct {
+ ptr C.virStorageVolPtr
+}
+
+type VirStorageVolInfo struct {
+ ptr C.virStorageVolInfo
+}
+
+func (v *VirStorageVol) Delete(flags uint32) error {
+ result := C.virStorageVolDelete(v.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (v *VirStorageVol) Free() error {
+ if result := C.virStorageVolFree(v.ptr); result != 0 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (v *VirStorageVol) GetInfo() (VirStorageVolInfo, error) {
+ vi := VirStorageVolInfo{}
+ var ptr C.virStorageVolInfo
+ result := C.virStorageVolGetInfo(v.ptr, (*C.virStorageVolInfo)(unsafe.Pointer(&ptr)))
+ if result == -1 {
+ return vi, GetLastError()
+ }
+ vi.ptr = ptr
+ return vi, nil
+}
+
+func (i *VirStorageVolInfo) GetType() int {
+ return int(i.ptr._type)
+}
+
+func (i *VirStorageVolInfo) GetCapacityInBytes() uint64 {
+ return uint64(i.ptr.capacity)
+}
+
+func (i *VirStorageVolInfo) GetAllocationInBytes() uint64 {
+ return uint64(i.ptr.allocation)
+}
+
+func (v *VirStorageVol) GetKey() (string, error) {
+ key := C.virStorageVolGetKey(v.ptr)
+ if key == nil {
+ return "", GetLastError()
+ }
+ return C.GoString(key), nil
+}
+
+func (v *VirStorageVol) GetName() (string, error) {
+ name := C.virStorageVolGetName(v.ptr)
+ if name == nil {
+ return "", GetLastError()
+ }
+ return C.GoString(name), nil
+}
+
+func (v *VirStorageVol) GetPath() (string, error) {
+ result := C.virStorageVolGetPath(v.ptr)
+ if result == nil {
+ return "", GetLastError()
+ }
+ path := C.GoString(result)
+ C.free(unsafe.Pointer(result))
+ return path, nil
+}
+
+func (v *VirStorageVol) GetXMLDesc(flags uint32) (string, error) {
+ result := C.virStorageVolGetXMLDesc(v.ptr, C.uint(flags))
+ if result == nil {
+ return "", GetLastError()
+ }
+ xml := C.GoString(result)
+ C.free(unsafe.Pointer(result))
+ return xml, nil
+}
+
+func (v *VirStorageVol) Resize(capacity uint64, flags uint32) error {
+ result := C.virStorageVolResize(v.ptr, C.ulonglong(capacity), C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (v *VirStorageVol) Wipe(flags uint32) error {
+ result := C.virStorageVolWipe(v.ptr, C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+func (v *VirStorageVol) WipePattern(algorithm uint32, flags uint32) error {
+ result := C.virStorageVolWipePattern(v.ptr, C.uint(algorithm), C.uint(flags))
+ if result == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (v *VirStorageVol) Upload(stream *VirStream, offset, length uint64, flags uint32) error {
+ if C.virStorageVolUpload(v.ptr, stream.ptr, C.ulonglong(offset),
+ C.ulonglong(length), C.uint(flags)) == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (v *VirStorageVol) Download(stream *VirStream, offset, length uint64, flags uint32) error {
+ if C.virStorageVolDownload(v.ptr, stream.ptr, C.ulonglong(offset),
+ C.ulonglong(length), C.uint(flags)) == -1 {
+ return GetLastError()
+ }
+ return nil
+}
+
+func (v *VirStorageVol) LookupPoolByVolume() (VirStoragePool, error) {
+ poolPtr := C.virStoragePoolLookupByVolume(v.ptr)
+ if poolPtr == nil {
+ return VirStoragePool{}, GetLastError()
+ }
+ return VirStoragePool{ptr: poolPtr}, nil
+}
+
+
diff --git a/vendor/github.com/dmacvicar/libvirt-go/stream.go b/vendor/github.com/dmacvicar/libvirt-go/stream.go
new file mode 100644
index 00000000..21b8af10
--- /dev/null
+++ b/vendor/github.com/dmacvicar/libvirt-go/stream.go
@@ -0,0 +1,79 @@
+package libvirt
+
+/*
+#cgo LDFLAGS: -lvirt
+#include <libvirt/libvirt.h>
+#include <libvirt/virterror.h>
+#include <stdlib.h>
+*/
+import "C"
+import (
+ "io"
+ "unsafe"
+)
+
+type VirStream struct {
+ ptr C.virStreamPtr
+}
+
+func NewVirStream(c *VirConnection, flags uint) (*VirStream, error) {
+ virStream := C.virStreamNew(c.ptr, C.uint(flags))
+ if virStream == nil {
+ return nil, GetLastError()
+ }
+
+ return &VirStream{
+ ptr: virStream,
+ }, nil
+}
+
+func (v *VirStream) Abort() error {
+ result := C.virStreamAbort(v.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+
+ return nil
+}
+
+func (v *VirStream) Close() error {
+ result := C.virStreamFinish(v.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+
+ return nil
+}
+
+func (v *VirStream) Free() error {
+ result := C.virStreamFree(v.ptr)
+ if result == -1 {
+ return GetLastError()
+ }
+
+ return nil
+}
+
+func (v *VirStream) Read(p []byte) (int, error) {
+ n := C.virStreamRecv(v.ptr, (*C.char)(unsafe.Pointer(&p[0])), C.size_t(len(p)))
+ if n < 0 {
+ return 0, GetLastError()
+ }
+ if n == 0 {
+ return 0, io.EOF
+ }
+
+ return int(n), nil
+}
+
+func (v *VirStream) Write(p []byte) (int, error) {
+ n := C.virStreamSend(v.ptr, (*C.char)(unsafe.Pointer(&p[0])), C.size_t(len(p)))
+ if n < 0 {
+ return 0, GetLastError()
+ }
+ if n == 0 {
+ return 0, io.EOF
+ }
+
+ return int(n), nil
+}
diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE
new file mode 100644
index 00000000..37ec93a1
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md
new file mode 100644
index 00000000..85947422
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/README.md
@@ -0,0 +1,740 @@
+INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge)
+===
+
+![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
+
+Package ini provides INI file read and write functionality in Go.
+
+[简体中文](README_ZH.md)
+
+## Feature
+
+- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+To use a tagged revision:
+
+ go get gopkg.in/ini.v1
+
+To use with latest changes:
+
+ go get github.com/go-ini/ini
+
+Please add `-u` flag to update in the future.
+
+### Testing
+
+If you want to test on your machine, please apply `-t` flag:
+
+ go get -t gopkg.in/ini.v1
+
+Please add `-u` flag to update in the future.
+
+## Getting Started
+
+### Loading from data sources
+
+A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error.
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
+```
+
+Or start with an empty object:
+
+```go
+cfg := ini.Empty()
+```
+
+When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later.
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error.
+
+```go
+cfg, err := ini.LooseLoad("filename", "filename_404")
+```
+
+The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual.
+
+#### Ignore cases of key name
+
+When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing.
+
+```go
+cfg, err := ini.InsensitiveLoad("filename")
+//...
+
+// sec1 and sec2 are the exactly same section object
+sec1, err := cfg.GetSection("Section")
+sec2, err := cfg.GetSection("SecTIOn")
+
+// key1 and key2 are the exactly same key object
+key1, err := cfg.GetKey("Key")
+key2, err := cfg.GetKey("KeY")
+```
+
+#### MySQL-like boolean key
+
+MySQL's configuration allows a key without value as follows:
+
+```ini
+[mysqld]
+...
+skip-host-cache
+skip-name-resolve
+```
+
+By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options:
+
+```go
+cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
+```
+
+The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read.
+
+To generate such keys in your program, you could use `NewBooleanKey`:
+
+```go
+key, err := sec.NewBooleanKey("skip-host-cache")
+```
+
+#### Comment
+
+Take care that following format will be treated as comment:
+
+1. Line begins with `#` or `;`
+2. Words after `#` or `;`
+3. Words after section name (i.e words after `[some section name]`)
+
+If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```.
+
+### Working with sections
+
+To get a section, you would need to:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+For a shortcut for default section, just give an empty string as name:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+When you're pretty sure the section exists, following code could make your life easier:
+
+```go
+section := cfg.Section("section name")
+```
+
+What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
+
+To create a new section:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+To get a list of sections or section names:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### Working with keys
+
+To get a key under a section:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+Same rule applies to key operations:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+To check if a key exists:
+
+```go
+yes := cfg.Section("").HasKey("key name")
+```
+
+To create a new key:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+To get a list of keys or key names:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+To get a clone hash of keys and corresponding values:
+
+```go
+hash := cfg.Section("").KeysHash()
+```
+
+### Working with values
+
+To get a string value:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+To validate key value on the fly:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+ if len(in) == 0 {
+ return "default"
+ }
+ return in
+})
+```
+
+If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance):
+
+```go
+val := cfg.Section("").Key("key name").Value()
+```
+
+To check if raw value exists:
+
+```go
+yes := cfg.Section("").HasValue("test value")
+```
+
+To get value with types:
+
+```go
+// For boolean values:
+// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
+// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// Methods start with Must also accept one argument for default value
+// when key not found or fail to parse value to given type.
+// Except method MustString, which you have to pass a default value.
+
+v = cfg.Section("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+What if my value is three-line long?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+Not a problem!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------ end --- */
+```
+
+That's cool, how about continuation lines?
+
+```ini
+[advance]
+two_lines = how about \
+ continuation lines?
+lots_of_lines = 1 \
+ 2 \
+ 3 \
+ 4
+```
+
+Piece of cake!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+Well, I hate continuation lines, how do I disable that?
+
+```go
+cfg, err := ini.LoadSources(ini.LoadOptions{
+ IgnoreContinuation: true,
+}, "filename")
+```
+
+Holy crap!
+
+Note that single quotes around values will be stripped:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+That's all? Hmm, no.
+
+#### Helper methods of working with values
+
+To get value with given candidates:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
+
+To validate value in a given range:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+##### Auto-split values into a slice
+
+To use zero value of type for invalid inputs:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+To exclude invalid values out of result slice:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [2.2]
+vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
+vals = cfg.Section("").Key("INTS").ValidInts(",")
+vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
+vals = cfg.Section("").Key("UINTS").ValidUints(",")
+vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
+vals = cfg.Section("").Key("TIMES").ValidTimes(",")
+```
+
+Or to return nothing but error when have invalid inputs:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> error
+vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
+vals = cfg.Section("").Key("INTS").StrictInts(",")
+vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
+vals = cfg.Section("").Key("UINTS").StrictUints(",")
+vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
+vals = cfg.Section("").Key("TIMES").StrictTimes(",")
+```
+
+### Save your configuration
+
+Finally, it's time to save your configuration to somewhere.
+
+A typical way to save configuration is writing it to a file:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+Another way to save is writing to a `io.Writer` interface:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+By default, spaces are used to align "=" sign between key and values, to disable that:
+
+```go
+ini.PrettyFormat = false
+```
+
+## Advanced Usage
+
+### Recursive Values
+
+For all value of keys, there is a special syntax `%(<name>)s`, where `<name>` is the key name in same section or default section, and `%(<name>)s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
+```
+
+### Parent-child Sections
+
+You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
+```
+
+#### Retrieve parent keys available to a child section
+
+```go
+cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
+```
+
+### Unparseable Sections
+
+Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`:
+
+```go
+cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
+
+body := cfg.Section("COMMENTS").Body()
+
+/* --- start ---
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
+------ end --- */
+```
+
+### Auto-increment Key Names
+
+If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
+```
+
+### Map To Struct
+
+Want more objective way to play with INI? Cool.
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+ Content string
+ Cities []string
+}
+
+type Person struct {
+ Name string
+ Age int `ini:"age"`
+ Male bool
+ Born time.Time
+ Note
+ Created time.Time `ini:"-"`
+}
+
+func main() {
+ cfg, err := ini.Load("path/to/ini")
+ // ...
+ p := new(Person)
+ err = cfg.MapTo(p)
+ // ...
+
+ // Things can be simpler.
+ err = ini.MapTo(p, "path/to/ini")
+ // ...
+
+ // Just map a section? Fine.
+ n := new(Note)
+ err = cfg.Section("Note").MapTo(n)
+ // ...
+}
+```
+
+Can I have default value for field? Absolutely.
+
+Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
+
+```go
+// ...
+p := &Person{
+ Name: "Joe",
+}
+// ...
+```
+
+It's really cool, but what's the point if you can't give me my file back from struct?
+
+### Reflect From Struct
+
+Why not?
+
+```go
+type Embeded struct {
+ Dates []time.Time `delim:"|"`
+ Places []string `ini:"places,omitempty"`
+ None []int `ini:",omitempty"`
+}
+
+type Author struct {
+ Name string `ini:"NAME"`
+ Male bool
+ Age int
+ GPA float64
+ NeverMind string `ini:"-"`
+ *Embeded
+}
+
+func main() {
+ a := &Author{"Unknwon", true, 21, 2.8, "",
+ &Embeded{
+ []time.Time{time.Now(), time.Now()},
+ []string{"HangZhou", "Boston"},
+ []int{},
+ }}
+ cfg := ini.Empty()
+ err = ini.ReflectFrom(cfg, a)
+ // ...
+}
+```
+
+So, what do I get?
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+places = HangZhou,Boston
+```
+
+#### Name Mapper
+
+To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
+
+There are 2 built-in name mappers:
+
+- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
+- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
+
+To use them:
+
+```go
+type Info struct {
+ PackageName string
+}
+
+func main() {
+ err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
+ // ...
+
+ cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+ // ...
+ info := new(Info)
+ cfg.NameMapper = ini.AllCapsUnderscore
+ err = cfg.MapTo(info)
+ // ...
+}
+```
+
+Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
+
+#### Value Mapper
+
+To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values:
+
+```go
+type Env struct {
+ Foo string `ini:"foo"`
+}
+
+func main() {
+ cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
+ cfg.ValueMapper = os.ExpandEnv
+ // ...
+ env := &Env{}
+ err = cfg.Section("env").MapTo(env)
+}
+```
+
+This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`.
+
+#### Other Notes On Map/Reflect
+
+Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child `ini:"Parent"`
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## Getting Help
+
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+- [File An Issue](https://github.com/go-ini/ini/issues/new)
+
+## FAQs
+
+### What does `BlockMode` field do?
+
+By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
+
+### Why another INI library?
+
+Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
+
+To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md
new file mode 100644
index 00000000..163432db
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/README_ZH.md
@@ -0,0 +1,727 @@
+本包提供了 Go 语言中读写 INI 文件的功能。
+
+## 功能特性
+
+- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`)
+- 支持递归读取键值
+- 支持读取父子分区
+- 支持读取自增键名
+- 支持读取多行的键值
+- 支持大量辅助方法
+- 支持在读取时直接转换为 Go 语言类型
+- 支持读取和 **写入** 分区和键的注释
+- 轻松操作分区、键值和注释
+- 在保存文件时分区和键值会保持原有的顺序
+
+## 下载安装
+
+使用一个特定版本:
+
+ go get gopkg.in/ini.v1
+
+使用最新版:
+
+ go get github.com/go-ini/ini
+
+如需更新请添加 `-u` 选项。
+
+### 测试安装
+
+如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
+
+ go get -t gopkg.in/ini.v1
+
+如需更新请添加 `-u` 选项。
+
+## 开始使用
+
+### 从数据源加载
+
+一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
+```
+
+或者从一个空白的文件开始:
+
+```go
+cfg := ini.Empty()
+```
+
+当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误):
+
+```go
+cfg, err := ini.LooseLoad("filename", "filename_404")
+```
+
+更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。
+
+#### 忽略键名的大小写
+
+有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写:
+
+```go
+cfg, err := ini.InsensitiveLoad("filename")
+//...
+
+// sec1 和 sec2 指向同一个分区对象
+sec1, err := cfg.GetSection("Section")
+sec2, err := cfg.GetSection("SecTIOn")
+
+// key1 和 key2 指向同一个键对象
+key1, err := cfg.GetKey("Key")
+key2, err := cfg.GetKey("KeY")
+```
+
+#### 类似 MySQL 配置中的布尔值键
+
+MySQL 的配置文件中会出现没有具体值的布尔类型的键:
+
+```ini
+[mysqld]
+...
+skip-host-cache
+skip-name-resolve
+```
+
+默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理:
+
+```go
+cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
+```
+
+这些键的值永远为 `true`,且在保存到文件时也只会输出键名。
+
+如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`:
+
+```go
+key, err := sec.NewBooleanKey("skip-host-cache")
+```
+
+#### 关于注释
+
+下述几种情况的内容将被视为注释:
+
+1. 所有以 `#` 或 `;` 开头的行
+2. 所有在 `#` 或 `;` 之后的内容
+3. 分区标签后的文字 (即 `[分区名]` 之后的内容)
+
+如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。
+
+### 操作分区(Section)
+
+获取指定分区:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+如果您想要获取默认分区,则可以用空字符串代替分区名:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+当您非常确定某个分区是存在的,可以使用以下简便方法:
+
+```go
+section := cfg.Section("section name")
+```
+
+如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
+
+创建一个分区:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+获取所有分区对象或名称:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### 操作键(Key)
+
+获取某个分区下的键:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+和分区一样,您也可以直接获取键而忽略错误处理:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+判断某个键是否存在:
+
+```go
+yes := cfg.Section("").HasKey("key name")
+```
+
+创建一个新的键:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+获取分区下的所有键或键名:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+获取分区下的所有键值对的克隆:
+
+```go
+hash := cfg.Section("").KeysHash()
+```
+
+### 操作键值(Value)
+
+获取一个类型为字符串(string)的值:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+获取值的同时通过自定义函数进行处理验证:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+ if len(in) == 0 {
+ return "default"
+ }
+ return in
+})
+```
+
+如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳):
+
+```go
+val := cfg.Section("").Key("key name").Value()
+```
+
+判断某个原值是否存在:
+
+```go
+yes := cfg.Section("").HasValue("test value")
+```
+
+获取其它类型的值:
+
+```go
+// 布尔值的规则:
+// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
+// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
+// 当键不存在或者转换失败时,则会直接返回该默认值。
+// 但是,MustString 方法必须传递一个默认值。
+
+v = cfg.Seciont("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+如果我的值有好多行怎么办?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+嗯哼?小 case!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------ end --- */
+```
+
+赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
+
+```ini
+[advance]
+two_lines = how about \
+ continuation lines?
+lots_of_lines = 1 \
+ 2 \
+ 3 \
+ 4
+```
+
+简直是小菜一碟!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢?
+
+```go
+cfg, err := ini.LoadSources(ini.LoadOptions{
+ IgnoreContinuation: true,
+}, "filename")
+```
+
+哇靠给力啊!
+
+需要注意的是,值两侧的单引号会被自动剔除:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+这就是全部了?哈哈,当然不是。
+
+#### 操作键值的辅助方法
+
+获取键值时设定候选值:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
+
+验证获取的值是否在指定范围内:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+##### 自动分割键值到切片(slice)
+
+当存在无效输入时,使用零值代替:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+从结果切片中剔除无效输入:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [2.2]
+vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
+vals = cfg.Section("").Key("INTS").ValidInts(",")
+vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
+vals = cfg.Section("").Key("UINTS").ValidUints(",")
+vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
+vals = cfg.Section("").Key("TIMES").ValidTimes(",")
+```
+
+当存在无效输入时,直接返回错误:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> error
+vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
+vals = cfg.Section("").Key("INTS").StrictInts(",")
+vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
+vals = cfg.Section("").Key("UINTS").StrictUints(",")
+vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
+vals = cfg.Section("").Key("TIMES").StrictTimes(",")
+```
+
+### 保存配置
+
+终于到了这个时刻,是时候保存一下配置了。
+
+比较原始的做法是输出配置到某个文件:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能:
+
+```go
+ini.PrettyFormat = false
+```
+
+## 高级用法
+
+### 递归读取键值
+
+在获取所有键值的过程中,特殊语法 `%(<name>)s` 会被应用,其中 `<name>` 可以是相同分区或者默认分区下的键名。字符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
+```
+
+### 读取父子分区
+
+您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
+```
+
+#### 获取上级父分区下的所有键名
+
+```go
+cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
+```
+
+### 无法解析的分区
+
+如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理:
+
+```go
+cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
+
+body := cfg.Section("COMMENTS").Body()
+
+/* --- start ---
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
+------ end --- */
+```
+
+### 读取自增键名
+
+如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
+```
+
+### 映射到结构
+
+想要使用更加面向对象的方式玩转 INI 吗?好主意。
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+ Content string
+ Cities []string
+}
+
+type Person struct {
+ Name string
+ Age int `ini:"age"`
+ Male bool
+ Born time.Time
+ Note
+ Created time.Time `ini:"-"`
+}
+
+func main() {
+ cfg, err := ini.Load("path/to/ini")
+ // ...
+ p := new(Person)
+ err = cfg.MapTo(p)
+ // ...
+
+ // 一切竟可以如此的简单。
+ err = ini.MapTo(p, "path/to/ini")
+ // ...
+
+ // 嗯哼?只需要映射一个分区吗?
+ n := new(Note)
+ err = cfg.Section("Note").MapTo(n)
+ // ...
+}
+```
+
+结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
+
+```go
+// ...
+p := &Person{
+ Name: "Joe",
+}
+// ...
+```
+
+这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
+
+### 从结构反射
+
+可是,我有说不能吗?
+
+```go
+type Embeded struct {
+ Dates []time.Time `delim:"|"`
+ Places []string `ini:"places,omitempty"`
+ None []int `ini:",omitempty"`
+}
+
+type Author struct {
+ Name string `ini:"NAME"`
+ Male bool
+ Age int
+ GPA float64
+ NeverMind string `ini:"-"`
+ *Embeded
+}
+
+func main() {
+ a := &Author{"Unknwon", true, 21, 2.8, "",
+ &Embeded{
+ []time.Time{time.Now(), time.Now()},
+ []string{"HangZhou", "Boston"},
+ []int{},
+ }}
+ cfg := ini.Empty()
+ err = ini.ReflectFrom(cfg, a)
+ // ...
+}
+```
+
+瞧瞧,奇迹发生了。
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+places = HangZhou,Boston
+```
+
+#### 名称映射器(Name Mapper)
+
+为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
+
+目前有 2 款内置的映射器:
+
+- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
+- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
+
+使用方法:
+
+```go
+type Info struct{
+ PackageName string
+}
+
+func main() {
+ err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
+ // ...
+
+ cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+ // ...
+ info := new(Info)
+ cfg.NameMapper = ini.AllCapsUnderscore
+ err = cfg.MapTo(info)
+ // ...
+}
+```
+
+使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
+
+#### 值映射器(Value Mapper)
+
+值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量:
+
+```go
+type Env struct {
+ Foo string `ini:"foo"`
+}
+
+func main() {
+ cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
+ cfg.ValueMapper = os.ExpandEnv
+ // ...
+ env := &Env{}
+ err = cfg.Section("env").MapTo(env)
+}
+```
+
+本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。
+
+#### 映射/反射的其它说明
+
+任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child `ini:"Parent"`
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## 获取帮助
+
+- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
+- [创建工单](https://github.com/go-ini/ini/issues/new)
+
+## 常见问题
+
+### 字段 `BlockMode` 是什么?
+
+默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
+
+### 为什么要写另一个 INI 解析库?
+
+许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
+
+为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)
diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go
new file mode 100644
index 00000000..80afe743
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/error.go
@@ -0,0 +1,32 @@
+// Copyright 2016 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "fmt"
+)
+
+type ErrDelimiterNotFound struct {
+ Line string
+}
+
+func IsErrDelimiterNotFound(err error) bool {
+ _, ok := err.(ErrDelimiterNotFound)
+ return ok
+}
+
+func (err ErrDelimiterNotFound) Error() string {
+ return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
+}
diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go
new file mode 100644
index 00000000..5211d5ab
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/ini.go
@@ -0,0 +1,556 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ // Name for default section. You can use this constant or the string literal.
+ // In most of cases, an empty string is all you need to access the section.
+ DEFAULT_SECTION = "DEFAULT"
+
+ // Maximum allowed depth when recursively substituing variable names.
+ _DEPTH_VALUES = 99
+ _VERSION = "1.27.0"
+)
+
+// Version returns current package version literal.
+func Version() string {
+ return _VERSION
+}
+
+var (
+ // Delimiter to determine or compose a new line.
+ // This variable will be changed to "\r\n" automatically on Windows
+ // at package init time.
+ LineBreak = "\n"
+
+ // Variable regexp pattern: %(variable)s
+ varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
+
+ // Indicate whether to align "=" sign with spaces to produce pretty output
+ // or reduce all possible spaces for compact format.
+ PrettyFormat = true
+
+ // Explicitly write DEFAULT section header
+ DefaultHeader = false
+)
+
+func init() {
+ if runtime.GOOS == "windows" {
+ LineBreak = "\r\n"
+ }
+}
+
+func inSlice(str string, s []string) bool {
+ for _, v := range s {
+ if str == v {
+ return true
+ }
+ }
+ return false
+}
+
+// dataSource is an interface that returns object which can be read and closed.
+type dataSource interface {
+ ReadCloser() (io.ReadCloser, error)
+}
+
+// sourceFile represents an object that contains content on the local file system.
+type sourceFile struct {
+ name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+ return os.Open(s.name)
+}
+
+type bytesReadCloser struct {
+ reader io.Reader
+}
+
+func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
+ return rc.reader.Read(p)
+}
+
+func (rc *bytesReadCloser) Close() error {
+ return nil
+}
+
+// sourceData represents an object that contains content in memory.
+type sourceData struct {
+ data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewReader(s.data)), nil
+}
+
+// sourceReadCloser represents an input stream with Close method.
+type sourceReadCloser struct {
+ reader io.ReadCloser
+}
+
+func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
+ return s.reader, nil
+}
+
+// File represents a combination of a or more INI file(s) in memory.
+type File struct {
+ // Should make things safe, but sometimes doesn't matter.
+ BlockMode bool
+ // Make sure data is safe in multiple goroutines.
+ lock sync.RWMutex
+
+ // Allow combination of multiple data sources.
+ dataSources []dataSource
+ // Actual data is stored here.
+ sections map[string]*Section
+
+ // To keep data in order.
+ sectionList []string
+
+ options LoadOptions
+
+ NameMapper
+ ValueMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource, opts LoadOptions) *File {
+ return &File{
+ BlockMode: true,
+ dataSources: dataSources,
+ sections: make(map[string]*Section),
+ sectionList: make([]string, 0, 10),
+ options: opts,
+ }
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+ switch s := source.(type) {
+ case string:
+ return sourceFile{s}, nil
+ case []byte:
+ return &sourceData{s}, nil
+ case io.ReadCloser:
+ return &sourceReadCloser{s}, nil
+ default:
+ return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
+ }
+}
+
+type LoadOptions struct {
+ // Loose indicates whether the parser should ignore nonexistent files or return error.
+ Loose bool
+ // Insensitive indicates whether the parser forces all section and key names to lowercase.
+ Insensitive bool
+ // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
+ IgnoreContinuation bool
+ // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
+ IgnoreInlineComment bool
+ // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
+ // This type of keys are mostly used in my.cnf.
+ AllowBooleanKeys bool
+ // AllowShadows indicates whether to keep track of keys with same name under same section.
+ AllowShadows bool
+ // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
+ // conform to key/value pairs. Specify the names of those blocks here.
+ UnparseableSections []string
+}
+
+func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
+ sources := make([]dataSource, len(others)+1)
+ sources[0], err = parseDataSource(source)
+ if err != nil {
+ return nil, err
+ }
+ for i := range others {
+ sources[i+1], err = parseDataSource(others[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ f := newFile(sources, opts)
+ if err = f.Reload(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+// It will return error if list contains nonexistent files.
+func Load(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{}, source, others...)
+}
+
+// LooseLoad has exactly same functionality as Load function
+// except it ignores nonexistent files instead of returning error.
+func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Loose: true}, source, others...)
+}
+
+// InsensitiveLoad has exactly same functionality as Load function
+// except it forces all section and key names to be lowercased.
+func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Insensitive: true}, source, others...)
+}
+
+// InsensitiveLoad has exactly same functionality as Load function
+// except it allows have shadow keys.
+func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
+}
+
+// Empty returns an empty file object.
+func Empty() *File {
+ // Ignore error here, we sure our data is good.
+ f, _ := Load([]byte(""))
+ return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new section: empty section name")
+ } else if f.options.Insensitive && name != DEFAULT_SECTION {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if inSlice(name, f.sectionList) {
+ return f.sections[name], nil
+ }
+
+ f.sectionList = append(f.sectionList, name)
+ f.sections[name] = newSection(f, name)
+ return f.sections[name], nil
+}
+
+// NewRawSection creates a new section with an unparseable body.
+func (f *File) NewRawSection(name, body string) (*Section, error) {
+ section, err := f.NewSection(name)
+ if err != nil {
+ return nil, err
+ }
+
+ section.isRawSection = true
+ section.rawBody = body
+ return section, nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+ for _, name := range names {
+ if _, err = f.NewSection(name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ } else if f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sec := f.sections[name]
+ if sec == nil {
+ return nil, fmt.Errorf("section '%s' does not exist", name)
+ }
+ return sec, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+ sec, err := f.GetSection(name)
+ if err != nil {
+ // Note: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ sec, _ = f.NewSection(name)
+ return sec
+ }
+ return sec
+}
+
+// Section returns list of Section.
+func (f *File) Sections() []*Section {
+ sections := make([]*Section, len(f.sectionList))
+ for i := range f.sectionList {
+ sections[i] = f.Section(f.sectionList[i])
+ }
+ return sections
+}
+
+// ChildSections returns a list of child sections of given section name.
+func (f *File) ChildSections(name string) []*Section {
+ return f.Section(name).ChildSections()
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+ list := make([]string, len(f.sectionList))
+ copy(list, f.sectionList)
+ return list
+}
+
+// DeleteSection deletes a section.
+func (f *File) DeleteSection(name string) {
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ }
+
+ for i, s := range f.sectionList {
+ if s == name {
+ f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+ delete(f.sections, name)
+ return
+ }
+ }
+}
+
+func (f *File) reload(s dataSource) error {
+ r, err := s.ReadCloser()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+ for _, s := range f.dataSources {
+ if err = f.reload(s); err != nil {
+ // In loose mode, we create an empty default section for nonexistent files.
+ if os.IsNotExist(err) && f.options.Loose {
+ f.parse(bytes.NewBuffer(nil))
+ continue
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+ ds, err := parseDataSource(source)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ for _, s := range others {
+ ds, err = parseDataSource(s)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ }
+ return f.Reload()
+}
+
+// WriteToIndent writes content into io.Writer with given indention.
+// If PrettyFormat has been set to be true,
+// it will align "=" sign with spaces under each section.
+func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
+ equalSign := "="
+ if PrettyFormat {
+ equalSign = " = "
+ }
+
+ // Use buffer to make sure target is safe until finish encoding.
+ buf := bytes.NewBuffer(nil)
+ for i, sname := range f.sectionList {
+ sec := f.Section(sname)
+ if len(sec.Comment) > 0 {
+ if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
+ sec.Comment = "; " + sec.Comment
+ }
+ if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ if i > 0 || DefaultHeader {
+ if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+ return 0, err
+ }
+ } else {
+ // Write nothing if default section is empty
+ if len(sec.keyList) == 0 {
+ continue
+ }
+ }
+
+ if sec.isRawSection {
+ if _, err = buf.WriteString(sec.rawBody); err != nil {
+ return 0, err
+ }
+ continue
+ }
+
+ // Count and generate alignment length and buffer spaces using the
+ // longest key. Keys may be modifed if they contain certain characters so
+ // we need to take that into account in our calculation.
+ alignLength := 0
+ if PrettyFormat {
+ for _, kname := range sec.keyList {
+ keyLength := len(kname)
+ // First case will surround key by ` and second by """
+ if strings.ContainsAny(kname, "\"=:") {
+ keyLength += 2
+ } else if strings.Contains(kname, "`") {
+ keyLength += 6
+ }
+
+ if keyLength > alignLength {
+ alignLength = keyLength
+ }
+ }
+ }
+ alignSpaces := bytes.Repeat([]byte(" "), alignLength)
+
+ KEY_LIST:
+ for _, kname := range sec.keyList {
+ key := sec.Key(kname)
+ if len(key.Comment) > 0 {
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+ if key.Comment[0] != '#' && key.Comment[0] != ';' {
+ key.Comment = "; " + key.Comment
+ }
+ if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+
+ switch {
+ case key.isAutoIncrement:
+ kname = "-"
+ case strings.ContainsAny(kname, "\"=:"):
+ kname = "`" + kname + "`"
+ case strings.Contains(kname, "`"):
+ kname = `"""` + kname + `"""`
+ }
+
+ for _, val := range key.ValueWithShadows() {
+ if _, err = buf.WriteString(kname); err != nil {
+ return 0, err
+ }
+
+ if key.isBooleanType {
+ if kname != sec.keyList[len(sec.keyList)-1] {
+ buf.WriteString(LineBreak)
+ }
+ continue KEY_LIST
+ }
+
+ // Write out alignment spaces before "=" sign
+ if PrettyFormat {
+ buf.Write(alignSpaces[:alignLength-len(kname)])
+ }
+
+ // In case key value contains "\n", "`", "\"", "#" or ";"
+ if strings.ContainsAny(val, "\n`") {
+ val = `"""` + val + `"""`
+ } else if strings.ContainsAny(val, "#;") {
+ val = "`" + val + "`"
+ }
+ if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ // Put a line between sections
+ if _, err = buf.WriteString(LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+ return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+ // Note: Because we are truncating with os.Create,
+ // so it's safer to save to a temporary file location and rename afte done.
+ tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
+ defer os.Remove(tmpPath)
+
+ fw, err := os.Create(tmpPath)
+ if err != nil {
+ return err
+ }
+
+ if _, err = f.WriteToIndent(fw, indent); err != nil {
+ fw.Close()
+ return err
+ }
+ fw.Close()
+
+ // Remove old file and rename the new one.
+ os.Remove(filename)
+ return os.Rename(tmpPath, filename)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+ return f.SaveToIndent(filename, "")
+}
diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go
new file mode 100644
index 00000000..838356af
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/key.go
@@ -0,0 +1,699 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Key represents a key under a section.
+type Key struct {
+ s *Section
+ name string
+ value string
+ isAutoIncrement bool
+ isBooleanType bool
+
+ isShadow bool
+ shadows []*Key
+
+ Comment string
+}
+
+// newKey simply return a key object with given values.
+func newKey(s *Section, name, val string) *Key {
+ return &Key{
+ s: s,
+ name: name,
+ value: val,
+ }
+}
+
+func (k *Key) addShadow(val string) error {
+ if k.isShadow {
+ return errors.New("cannot add shadow to another shadow key")
+ } else if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add shadow to auto-increment or boolean key")
+ }
+
+ shadow := newKey(k.s, k.name, val)
+ shadow.isShadow = true
+ k.shadows = append(k.shadows, shadow)
+ return nil
+}
+
+// AddShadow adds a new shadow key to itself.
+func (k *Key) AddShadow(val string) error {
+ if !k.s.f.options.AllowShadows {
+ return errors.New("shadow key is not allowed")
+ }
+ return k.addShadow(val)
+}
+
+// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
+type ValueMapper func(string) string
+
+// Name returns name of key.
+func (k *Key) Name() string {
+ return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+ return k.value
+}
+
+// ValueWithShadows returns raw values of key and its shadows if any.
+func (k *Key) ValueWithShadows() []string {
+ if len(k.shadows) == 0 {
+ return []string{k.value}
+ }
+ vals := make([]string, len(k.shadows)+1)
+ vals[0] = k.value
+ for i := range k.shadows {
+ vals[i+1] = k.shadows[i].value
+ }
+ return vals
+}
+
+// transformValue takes a raw value and transforms to its final string.
+func (k *Key) transformValue(val string) string {
+ if k.s.f.ValueMapper != nil {
+ val = k.s.f.ValueMapper(val)
+ }
+
+ // Fail-fast if no indicate char found for recursive value
+ if !strings.Contains(val, "%") {
+ return val
+ }
+ for i := 0; i < _DEPTH_VALUES; i++ {
+ vr := varPattern.FindString(val)
+ if len(vr) == 0 {
+ break
+ }
+
+ // Take off leading '%(' and trailing ')s'.
+ noption := strings.TrimLeft(vr, "%(")
+ noption = strings.TrimRight(noption, ")s")
+
+ // Search in the same section.
+ nk, err := k.s.GetKey(noption)
+ if err != nil {
+ // Search again in default section.
+ nk, _ = k.s.f.Section("").GetKey(noption)
+ }
+
+ // Substitute by new value and take off leading '%(' and trailing ')s'.
+ val = strings.Replace(val, vr, nk.value, -1)
+ }
+ return val
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+ return k.transformValue(k.value)
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+ return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
+ return true, nil
+ case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
+ return false, nil
+ }
+ return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+ return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+ return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+ return strconv.Atoi(k.String())
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+ return strconv.ParseInt(k.String(), 10, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+ u, e := strconv.ParseUint(k.String(), 10, 64)
+ return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+ return strconv.ParseUint(k.String(), 10, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+ return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+ return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+ return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+ val := k.String()
+ if len(val) == 0 {
+ k.value = defaultVal
+ return defaultVal
+ }
+ return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+ val, err := k.Bool()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatBool(defaultVal[0])
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+ val, err := k.Float64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+ val, err := k.Int()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+ val, err := k.Int64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+ val, err := k.Uint()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+ val, err := k.Uint64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+ val, err := k.Duration()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].String()
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+ val, err := k.TimeFormat(format)
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].Format(format)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+ return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+ val := k.String()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+ val := k.MustFloat64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+ val := k.MustInt()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+ val := k.MustInt64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+ val := k.MustUint()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+ val := k.MustUint64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+ return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+ val := k.MustFloat64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+ val := k.MustInt()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+ val := k.MustInt64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+ return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string divided by given delimiter.
+func (k *Key) Strings(delim string) []string {
+ str := k.String()
+ if len(str) == 0 {
+ return []string{}
+ }
+
+ vals := strings.Split(str, delim)
+ for i := range vals {
+ // vals[i] = k.transformValue(strings.TrimSpace(vals[i]))
+ vals[i] = strings.TrimSpace(vals[i])
+ }
+ return vals
+}
+
+// StringsWithShadows returns list of string divided by given delimiter.
+// Shadows will also be appended if any.
+func (k *Key) StringsWithShadows(delim string) []string {
+ vals := k.ValueWithShadows()
+ results := make([]string, 0, len(vals)*2)
+ for i := range vals {
+ if len(vals) == 0 {
+ continue
+ }
+
+ results = append(results, strings.Split(vals[i], delim)...)
+ }
+
+ for i := range results {
+ results[i] = k.transformValue(strings.TrimSpace(results[i]))
+ }
+ return results
+}
+
+// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Float64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Ints(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), true, false)
+ return vals
+}
+
+// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Int64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
+ return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) Times(delim string) []time.Time {
+ return k.TimesFormat(time.RFC3339, delim)
+}
+
+// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
+// it will not be included to result list.
+func (k *Key) ValidFloat64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
+// not be included to result list.
+func (k *Key) ValidInts(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
+// then it will not be included to result list.
+func (k *Key) ValidInt64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
+// then it will not be included to result list.
+func (k *Key) ValidUints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidUint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimes(delim string) []time.Time {
+ return k.ValidTimesFormat(time.RFC3339, delim)
+}
+
+// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
+ return k.parseFloat64s(k.Strings(delim), false, true)
+}
+
+// StrictInts returns list of int divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInts(delim string) ([]int, error) {
+ return k.parseInts(k.Strings(delim), false, true)
+}
+
+// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInt64s(delim string) ([]int64, error) {
+ return k.parseInt64s(k.Strings(delim), false, true)
+}
+
+// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUints(delim string) ([]uint, error) {
+ return k.parseUints(k.Strings(delim), false, true)
+}
+
+// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
+ return k.parseUint64s(k.Strings(delim), false, true)
+}
+
+// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
+ return k.parseTimesFormat(format, k.Strings(delim), false, true)
+}
+
+// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
+ return k.StrictTimesFormat(time.RFC3339, delim)
+}
+
+// parseFloat64s transforms strings to float64s.
+func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
+ vals := make([]float64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseFloat(str, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseInts transforms strings to ints.
+func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
+ vals := make([]int, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.Atoi(str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseInt64s transforms strings to int64s.
+func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
+ vals := make([]int64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseInt(str, 10, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseUints transforms strings to uints.
+func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
+ vals := make([]uint, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseUint(str, 10, 0)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, uint(val))
+ }
+ }
+ return vals, nil
+}
+
+// parseUint64s transforms strings to uint64s.
+func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
+ vals := make([]uint64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseUint(str, 10, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseTimesFormat transforms strings to times in given format.
+func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
+ vals := make([]time.Time, 0, len(strs))
+ for _, str := range strs {
+ val, err := time.Parse(format, str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+ if k.s.f.BlockMode {
+ k.s.f.lock.Lock()
+ defer k.s.f.lock.Unlock()
+ }
+
+ k.value = v
+ k.s.keysHash[k.name] = v
+}
diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go
new file mode 100644
index 00000000..6c0b1074
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/parser.go
@@ -0,0 +1,361 @@
+// Copyright 2015 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+type tokenType int
+
+const (
+ _TOKEN_INVALID tokenType = iota
+ _TOKEN_COMMENT
+ _TOKEN_SECTION
+ _TOKEN_KEY
+)
+
+type parser struct {
+ buf *bufio.Reader
+ isEOF bool
+ count int
+ comment *bytes.Buffer
+}
+
+func newParser(r io.Reader) *parser {
+ return &parser{
+ buf: bufio.NewReader(r),
+ count: 1,
+ comment: &bytes.Buffer{},
+ }
+}
+
+// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
+// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+func (p *parser) BOM() error {
+ mask, err := p.buf.Peek(2)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 2 {
+ return nil
+ }
+
+ switch {
+ case mask[0] == 254 && mask[1] == 255:
+ fallthrough
+ case mask[0] == 255 && mask[1] == 254:
+ p.buf.Read(mask)
+ case mask[0] == 239 && mask[1] == 187:
+ mask, err := p.buf.Peek(3)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 3 {
+ return nil
+ }
+ if mask[2] == 191 {
+ p.buf.Read(mask)
+ }
+ }
+ return nil
+}
+
+func (p *parser) readUntil(delim byte) ([]byte, error) {
+ data, err := p.buf.ReadBytes(delim)
+ if err != nil {
+ if err == io.EOF {
+ p.isEOF = true
+ } else {
+ return nil, err
+ }
+ }
+ return data, nil
+}
+
+func cleanComment(in []byte) ([]byte, bool) {
+ i := bytes.IndexAny(in, "#;")
+ if i == -1 {
+ return nil, false
+ }
+ return in[i:], true
+}
+
+func readKeyName(in []byte) (string, int, error) {
+ line := string(in)
+
+ // Check if key name surrounded by quotes.
+ var keyQuote string
+ if line[0] == '"' {
+ if len(line) > 6 && string(line[0:3]) == `"""` {
+ keyQuote = `"""`
+ } else {
+ keyQuote = `"`
+ }
+ } else if line[0] == '`' {
+ keyQuote = "`"
+ }
+
+ // Get out key name
+ endIdx := -1
+ if len(keyQuote) > 0 {
+ startIdx := len(keyQuote)
+ // FIXME: fail case -> """"""name"""=value
+ pos := strings.Index(line[startIdx:], keyQuote)
+ if pos == -1 {
+ return "", -1, fmt.Errorf("missing closing key quote: %s", line)
+ }
+ pos += startIdx
+
+ // Find key-value delimiter
+ i := strings.IndexAny(line[pos+startIdx:], "=:")
+ if i < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ endIdx = pos + i
+ return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
+ }
+
+ endIdx = strings.IndexAny(line, "=:")
+ if endIdx < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
+}
+
+func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := string(data)
+
+ pos := strings.LastIndex(next, valQuote)
+ if pos > -1 {
+ val += next[:pos]
+
+ comment, has := cleanComment([]byte(next[pos:]))
+ if has {
+ p.comment.Write(bytes.TrimSpace(comment))
+ }
+ break
+ }
+ val += next
+ if p.isEOF {
+ return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
+ }
+ }
+ return val, nil
+}
+
+func (p *parser) readContinuationLines(val string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := strings.TrimSpace(string(data))
+
+ if len(next) == 0 {
+ break
+ }
+ val += next
+ if val[len(val)-1] != '\\' {
+ break
+ }
+ val = val[:len(val)-1]
+ }
+ return val, nil
+}
+
+// hasSurroundedQuote check if and only if the first and last characters
+// are quotes \" or \'.
+// It returns false if any other parts also contain same kind of quotes.
+func hasSurroundedQuote(in string, quote byte) bool {
+ return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
+ strings.IndexByte(in[1:], quote) == len(in)-2
+}
+
+func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) {
+ line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
+ if len(line) == 0 {
+ return "", nil
+ }
+
+ var valQuote string
+ if len(line) > 3 && string(line[0:3]) == `"""` {
+ valQuote = `"""`
+ } else if line[0] == '`' {
+ valQuote = "`"
+ }
+
+ if len(valQuote) > 0 {
+ startIdx := len(valQuote)
+ pos := strings.LastIndex(line[startIdx:], valQuote)
+ // Check for multi-line value
+ if pos == -1 {
+ return p.readMultilines(line, line[startIdx:], valQuote)
+ }
+
+ return line[startIdx : pos+startIdx], nil
+ }
+
+ // Won't be able to reach here if value only contains whitespace
+ line = strings.TrimSpace(line)
+
+ // Check continuation lines when desired
+ if !ignoreContinuation && line[len(line)-1] == '\\' {
+ return p.readContinuationLines(line[:len(line)-1])
+ }
+
+ // Check if ignore inline comment
+ if !ignoreInlineComment {
+ i := strings.IndexAny(line, "#;")
+ if i > -1 {
+ p.comment.WriteString(line[i:])
+ line = strings.TrimSpace(line[:i])
+ }
+ }
+
+ // Trim single quotes
+ if hasSurroundedQuote(line, '\'') ||
+ hasSurroundedQuote(line, '"') {
+ line = line[1 : len(line)-1]
+ }
+ return line, nil
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) (err error) {
+ p := newParser(reader)
+ if err = p.BOM(); err != nil {
+ return fmt.Errorf("BOM: %v", err)
+ }
+
+ // Ignore error because default section name is never empty string.
+ section, _ := f.NewSection(DEFAULT_SECTION)
+
+ var line []byte
+ var inUnparseableSection bool
+ for !p.isEOF {
+ line, err = p.readUntil('\n')
+ if err != nil {
+ return err
+ }
+
+ line = bytes.TrimLeftFunc(line, unicode.IsSpace)
+ if len(line) == 0 {
+ continue
+ }
+
+ // Comments
+ if line[0] == '#' || line[0] == ';' {
+ // Note: we do not care ending line break,
+ // it is needed for adding second line,
+ // so just clean it once at the end when set to value.
+ p.comment.Write(line)
+ continue
+ }
+
+ // Section
+ if line[0] == '[' {
+ // Read to the next ']' (TODO: support quoted strings)
+ // TODO(unknwon): use LastIndexByte when stop supporting Go1.4
+ closeIdx := bytes.LastIndex(line, []byte("]"))
+ if closeIdx == -1 {
+ return fmt.Errorf("unclosed section: %s", line)
+ }
+
+ name := string(line[1:closeIdx])
+ section, err = f.NewSection(name)
+ if err != nil {
+ return err
+ }
+
+ comment, has := cleanComment(line[closeIdx+1:])
+ if has {
+ p.comment.Write(comment)
+ }
+
+ section.Comment = strings.TrimSpace(p.comment.String())
+
+ // Reset aotu-counter and comments
+ p.comment.Reset()
+ p.count = 1
+
+ inUnparseableSection = false
+ for i := range f.options.UnparseableSections {
+ if f.options.UnparseableSections[i] == name ||
+ (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
+ inUnparseableSection = true
+ continue
+ }
+ }
+ continue
+ }
+
+ if inUnparseableSection {
+ section.isRawSection = true
+ section.rawBody += string(line)
+ continue
+ }
+
+ kname, offset, err := readKeyName(line)
+ if err != nil {
+ // Treat as boolean key when desired, and whole line is key name.
+ if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
+ kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
+ if err != nil {
+ return err
+ }
+ key, err := section.NewBooleanKey(kname)
+ if err != nil {
+ return err
+ }
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ continue
+ }
+ return err
+ }
+
+ // Auto increment.
+ isAutoIncr := false
+ if kname == "-" {
+ isAutoIncr = true
+ kname = "#" + strconv.Itoa(p.count)
+ p.count++
+ }
+
+ value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
+ if err != nil {
+ return err
+ }
+
+ key, err := section.NewKey(kname, value)
+ if err != nil {
+ return err
+ }
+ key.isAutoIncrement = isAutoIncr
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go
new file mode 100644
index 00000000..94f7375e
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/section.go
@@ -0,0 +1,248 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Section represents a config section.
+type Section struct {
+ f *File
+ Comment string
+ name string
+ keys map[string]*Key
+ keyList []string
+ keysHash map[string]string
+
+ isRawSection bool
+ rawBody string
+}
+
+func newSection(f *File, name string) *Section {
+ return &Section{
+ f: f,
+ name: name,
+ keys: make(map[string]*Key),
+ keyList: make([]string, 0, 10),
+ keysHash: make(map[string]string),
+ }
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+ return s.name
+}
+
+// Body returns rawBody of Section if the section was marked as unparseable.
+// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
+func (s *Section) Body() string {
+ return strings.TrimSpace(s.rawBody)
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new key: empty key name")
+ } else if s.f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ if inSlice(name, s.keyList) {
+ if s.f.options.AllowShadows {
+ if err := s.keys[name].addShadow(val); err != nil {
+ return nil, err
+ }
+ } else {
+ s.keys[name].value = val
+ }
+ return s.keys[name], nil
+ }
+
+ s.keyList = append(s.keyList, name)
+ s.keys[name] = newKey(s, name, val)
+ s.keysHash[name] = val
+ return s.keys[name], nil
+}
+
+// NewBooleanKey creates a new boolean type key to given section.
+func (s *Section) NewBooleanKey(name string) (*Key, error) {
+ key, err := s.NewKey(name, "true")
+ if err != nil {
+ return nil, err
+ }
+
+ key.isBooleanType = true
+ return key, nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+ // FIXME: change to section level lock?
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ }
+ if s.f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+ key := s.keys[name]
+ if s.f.BlockMode {
+ s.f.lock.RUnlock()
+ }
+
+ if key == nil {
+ // Check if it is a child-section.
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, "."); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ return sec.GetKey(name)
+ } else {
+ break
+ }
+ }
+ return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
+ }
+ return key, nil
+}
+
+// HasKey returns true if section contains a key with given name.
+func (s *Section) HasKey(name string) bool {
+ key, _ := s.GetKey(name)
+ return key != nil
+}
+
+// Haskey is a backwards-compatible name for HasKey.
+func (s *Section) Haskey(name string) bool {
+ return s.HasKey(name)
+}
+
+// HasValue returns true if section contains given raw value.
+func (s *Section) HasValue(value string) bool {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ for _, k := range s.keys {
+ if value == k.value {
+ return true
+ }
+ }
+ return false
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+ key, err := s.GetKey(name)
+ if err != nil {
+ // It's OK here because the only possible error is empty key name,
+ // but if it's empty, this piece of code won't be executed.
+ key, _ = s.NewKey(name, "")
+ return key
+ }
+ return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+ keys := make([]*Key, len(s.keyList))
+ for i := range s.keyList {
+ keys[i] = s.Key(s.keyList[i])
+ }
+ return keys
+}
+
+// ParentKeys returns list of keys of parent section.
+func (s *Section) ParentKeys() []*Key {
+ var parentKeys []*Key
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, "."); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ parentKeys = append(parentKeys, sec.Keys()...)
+ } else {
+ break
+ }
+
+ }
+ return parentKeys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+ list := make([]string, len(s.keyList))
+ copy(list, s.keyList)
+ return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ hash := map[string]string{}
+ for key, value := range s.keysHash {
+ hash[key] = value
+ }
+ return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ for i, k := range s.keyList {
+ if k == name {
+ s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+ delete(s.keys, name)
+ return
+ }
+ }
+}
+
+// ChildSections returns a list of child sections of current section.
+// For example, "[parent.child1]" and "[parent.child12]" are child sections
+// of section "[parent]".
+func (s *Section) ChildSections() []*Section {
+ prefix := s.name + "."
+ children := make([]*Section, 0, 3)
+ for _, name := range s.f.sectionList {
+ if strings.HasPrefix(name, prefix) {
+ children = append(children, s.f.sections[name])
+ }
+ }
+ return children
+}
diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go
new file mode 100644
index 00000000..031c78b8
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/struct.go
@@ -0,0 +1,450 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+ "unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+ // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+ AllCapsUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ }
+ newstr = append(newstr, unicode.ToUpper(chr))
+ }
+ return string(newstr)
+ }
+ // TitleUnderscore converts to format title_underscore.
+ TitleUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ chr -= ('A' - 'a')
+ }
+ newstr = append(newstr, chr)
+ }
+ return string(newstr)
+ }
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ if s.f.NameMapper != nil {
+ return s.f.NameMapper(raw)
+ }
+ return raw
+}
+
+func parseDelim(actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setSliceWithProperType sets proper values to slice based on its type.
+func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
+ var strs []string
+ if allowShadow {
+ strs = key.StringsWithShadows(delim)
+ } else {
+ strs = key.Strings(delim)
+ }
+
+ numVals := len(strs)
+ if numVals == 0 {
+ return nil
+ }
+
+ var vals interface{}
+
+ sliceOf := field.Type().Elem().Kind()
+ switch sliceOf {
+ case reflect.String:
+ vals = strs
+ case reflect.Int:
+ vals, _ = key.parseInts(strs, true, false)
+ case reflect.Int64:
+ vals, _ = key.parseInt64s(strs, true, false)
+ case reflect.Uint:
+ vals, _ = key.parseUints(strs, true, false)
+ case reflect.Uint64:
+ vals, _ = key.parseUint64s(strs, true, false)
+ case reflect.Float64:
+ vals, _ = key.parseFloat64s(strs, true, false)
+ case reflectTime:
+ vals, _ = key.parseTimesFormat(time.RFC3339, strs, true, false)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+
+ slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+ for i := 0; i < numVals; i++ {
+ switch sliceOf {
+ case reflect.String:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
+ case reflect.Int:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
+ case reflect.Int64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
+ case reflect.Uint:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
+ case reflect.Uint64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
+ case reflect.Float64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
+ case reflectTime:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
+ }
+ }
+ field.Set(slice)
+ return nil
+}
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to strcut.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
+ switch t.Kind() {
+ case reflect.String:
+ if len(key.String()) == 0 {
+ return nil
+ }
+ field.SetString(key.String())
+ case reflect.Bool:
+ boolVal, err := key.Bool()
+ if err != nil {
+ return nil
+ }
+ field.SetBool(boolVal)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && int(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ intVal, err := key.Int64()
+ if err != nil || intVal == 0 {
+ return nil
+ }
+ field.SetInt(intVal)
+ // byte is an alias for uint8, so supporting uint8 breaks support for byte
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && int(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ uintVal, err := key.Uint64()
+ if err != nil {
+ return nil
+ }
+ field.SetUint(uintVal)
+
+ case reflect.Float32, reflect.Float64:
+ floatVal, err := key.Float64()
+ if err != nil {
+ return nil
+ }
+ field.SetFloat(floatVal)
+ case reflectTime:
+ timeVal, err := key.Time()
+ if err != nil {
+ return nil
+ }
+ field.Set(reflect.ValueOf(timeVal))
+ case reflect.Slice:
+ return setSliceWithProperType(key, field, delim, allowShadow)
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
+ opts := strings.SplitN(tag, ",", 3)
+ rawName = opts[0]
+ if len(opts) > 1 {
+ omitEmpty = opts[1] == "omitempty"
+ }
+ if len(opts) > 2 {
+ allowShadow = opts[2] == "allowshadow"
+ }
+ return rawName, omitEmpty, allowShadow
+}
+
+func (s *Section) mapTo(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ rawName, _, allowShadow := parseTagOptions(tag)
+ fieldName := s.parseFieldName(tpField.Name, rawName)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+ isStruct := tpField.Type.Kind() == reflect.Struct
+ if isAnonymous {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+
+ if isAnonymous || isStruct {
+ if sec, err := s.f.GetSection(fieldName); err == nil {
+ if err = sec.mapTo(field); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ continue
+ }
+ }
+
+ if key, err := s.GetKey(fieldName); err == nil {
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ }
+ }
+ return nil
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot map to non-pointer struct")
+ }
+
+ return s.mapTo(val)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+ return f.Section("").MapTo(v)
+}
+
+// MapTo maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.MapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+ return MapToWithMapper(v, nil, source, others...)
+}
+
+// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
+func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+
+ var buf bytes.Buffer
+ sliceOf := field.Type().Elem().Kind()
+ for i := 0; i < field.Len(); i++ {
+ switch sliceOf {
+ case reflect.String:
+ buf.WriteString(slice.Index(i).String())
+ case reflect.Int, reflect.Int64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
+ case reflect.Uint, reflect.Uint64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
+ case reflect.Float64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
+ case reflectTime:
+ buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ buf.WriteString(delim)
+ }
+ key.SetValue(buf.String()[:buf.Len()-1])
+ return nil
+}
+
+// reflectWithProperType does the opposite thing as setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+ switch t.Kind() {
+ case reflect.String:
+ key.SetValue(field.String())
+ case reflect.Bool:
+ key.SetValue(fmt.Sprint(field.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ key.SetValue(fmt.Sprint(field.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ key.SetValue(fmt.Sprint(field.Uint()))
+ case reflect.Float32, reflect.Float64:
+ key.SetValue(fmt.Sprint(field.Float()))
+ case reflectTime:
+ key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
+ case reflect.Slice:
+ return reflectSliceWithProperType(key, field, delim)
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+// CR: copied from encoding/json/encode.go with modifications of time.Time support.
+// TODO: add more test coverage.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflectTime:
+ return v.Interface().(time.Time).IsZero()
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ opts := strings.SplitN(tag, ",", 2)
+ if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
+ continue
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, opts[0])
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
+ (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
+ // Note: The only error here is section doesn't exist.
+ sec, err := s.f.GetSection(fieldName)
+ if err != nil {
+ // Note: fieldName can never be empty here, ignore error.
+ sec, _ = s.f.NewSection(fieldName)
+ }
+ if err = sec.reflectFrom(field); err != nil {
+ return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+ }
+ continue
+ }
+
+ // Note: Same reason as secion.
+ key, err := s.GetKey(fieldName)
+ if err != nil {
+ key, _ = s.NewKey(fieldName, "")
+ }
+ if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+ return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+ }
+
+ }
+ return nil
+}
+
+// ReflectFrom reflects secion from given struct.
+func (s *Section) ReflectFrom(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot reflect from non-pointer struct")
+ }
+
+ return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+ return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+ cfg.NameMapper = mapper
+ return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+ return ReflectFromWithMapper(cfg, v, nil)
+}
diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md
new file mode 100644
index 00000000..1c95f597
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/README.md
@@ -0,0 +1,89 @@
+# errwrap
+
+`errwrap` is a package for Go that formalizes the pattern of wrapping errors
+and checking if an error contains another error.
+
+There is a common pattern in Go of taking a returned `error` value and
+then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
+with this pattern is that you completely lose the original `error` structure.
+
+Arguably the _correct_ approach is that you should make a custom structure
+implementing the `error` interface, and have the original error as a field
+on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
+This is a good approach, but you have to know the entire chain of possible
+rewrapping that happens, when you might just care about one.
+
+`errwrap` formalizes this pattern (it doesn't matter what approach you use
+above) by giving a single interface for wrapping errors, checking if a specific
+error is wrapped, and extracting that error.
+
+## Installation and Docs
+
+Install using `go get github.com/hashicorp/errwrap`.
+
+Full documentation is available at
+http://godoc.org/github.com/hashicorp/errwrap
+
+## Usage
+
+#### Basic Usage
+
+Below is a very basic example of its usage:
+
+```go
+// A function that always returns an error, but wraps it, like a real
+// function might.
+func tryOpen() error {
+ _, err := os.Open("/i/dont/exist")
+ if err != nil {
+ return errwrap.Wrapf("Doesn't exist: {{err}}", err)
+ }
+
+ return nil
+}
+
+func main() {
+ err := tryOpen()
+
+ // We can use the Contains helpers to check if an error contains
+ // another error. It is safe to do this with a nil error, or with
+ // an error that doesn't even use the errwrap package.
+ if errwrap.Contains(err, ErrNotExist) {
+ // Do something
+ }
+ if errwrap.ContainsType(err, new(os.PathError)) {
+ // Do something
+ }
+
+ // Or we can use the associated `Get` functions to just extract
+ // a specific error. This would return nil if that specific error doesn't
+ // exist.
+ perr := errwrap.GetType(err, new(os.PathError))
+}
+```
+
+#### Custom Types
+
+If you're already making custom types that properly wrap errors, then
+you can get all the functionality of `errwraps.Contains` and such by
+implementing the `Wrapper` interface with just one function. Example:
+
+```go
+type AppError {
+ Code ErrorCode
+ Err error
+}
+
+func (e *AppError) WrappedErrors() []error {
+ return []error{e.Err}
+}
+```
+
+Now this works:
+
+```go
+err := &AppError{Err: fmt.Errorf("an error")}
+if errwrap.ContainsType(err, fmt.Errorf("")) {
+ // This will work!
+}
+```
diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go
new file mode 100644
index 00000000..a733bef1
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/errwrap.go
@@ -0,0 +1,169 @@
+// Package errwrap implements methods to formalize error wrapping in Go.
+//
+// All of the top-level functions that take an `error` are built to be able
+// to take any error, not just wrapped errors. This allows you to use errwrap
+// without having to type-check and type-cast everywhere.
+package errwrap
+
+import (
+ "errors"
+ "reflect"
+ "strings"
+)
+
+// WalkFunc is the callback called for Walk.
+type WalkFunc func(error)
+
+// Wrapper is an interface that can be implemented by custom types to
+// have all the Contains, Get, etc. functions in errwrap work.
+//
+// When Walk reaches a Wrapper, it will call the callback for every
+// wrapped error in addition to the wrapper itself. Since all the top-level
+// functions in errwrap use Walk, this means that all those functions work
+// with your custom type.
+type Wrapper interface {
+ WrappedErrors() []error
+}
+
+// Wrap defines that outer wraps inner, returning an error type that
+// can be cleanly used with the other methods in this package, such as
+// Contains, GetAll, etc.
+//
+// This function won't modify the error message at all (the outer message
+// will be used).
+func Wrap(outer, inner error) error {
+ return &wrappedError{
+ Outer: outer,
+ Inner: inner,
+ }
+}
+
+// Wrapf wraps an error with a formatting message. This is similar to using
+// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
+// errors, you should replace it with this.
+//
+// format is the format of the error message. The string '{{err}}' will
+// be replaced with the original error message.
+func Wrapf(format string, err error) error {
+ outerMsg := "<nil>"
+ if err != nil {
+ outerMsg = err.Error()
+ }
+
+ outer := errors.New(strings.Replace(
+ format, "{{err}}", outerMsg, -1))
+
+ return Wrap(outer, err)
+}
+
+// Contains checks if the given error contains an error with the
+// message msg. If err is not a wrapped error, this will always return
+// false unless the error itself happens to match this msg.
+func Contains(err error, msg string) bool {
+ return len(GetAll(err, msg)) > 0
+}
+
+// ContainsType checks if the given error contains an error with
+// the same concrete type as v. If err is not a wrapped error, this will
+// check the err itself.
+func ContainsType(err error, v interface{}) bool {
+ return len(GetAllType(err, v)) > 0
+}
+
+// Get is the same as GetAll but returns the deepest matching error.
+func Get(err error, msg string) error {
+ es := GetAll(err, msg)
+ if len(es) > 0 {
+ return es[len(es)-1]
+ }
+
+ return nil
+}
+
+// GetType is the same as GetAllType but returns the deepest matching error.
+func GetType(err error, v interface{}) error {
+ es := GetAllType(err, v)
+ if len(es) > 0 {
+ return es[len(es)-1]
+ }
+
+ return nil
+}
+
+// GetAll gets all the errors that might be wrapped in err with the
+// given message. The order of the errors is such that the outermost
+// matching error (the most recent wrap) is index zero, and so on.
+func GetAll(err error, msg string) []error {
+ var result []error
+
+ Walk(err, func(err error) {
+ if err.Error() == msg {
+ result = append(result, err)
+ }
+ })
+
+ return result
+}
+
+// GetAllType gets all the errors that are the same type as v.
+//
+// The order of the return value is the same as described in GetAll.
+func GetAllType(err error, v interface{}) []error {
+ var result []error
+
+ var search string
+ if v != nil {
+ search = reflect.TypeOf(v).String()
+ }
+ Walk(err, func(err error) {
+ var needle string
+ if err != nil {
+ needle = reflect.TypeOf(err).String()
+ }
+
+ if needle == search {
+ result = append(result, err)
+ }
+ })
+
+ return result
+}
+
+// Walk walks all the wrapped errors in err and calls the callback. If
+// err isn't a wrapped error, this will be called once for err. If err
+// is a wrapped error, the callback will be called for both the wrapper
+// that implements error as well as the wrapped error itself.
+func Walk(err error, cb WalkFunc) {
+ if err == nil {
+ return
+ }
+
+ switch e := err.(type) {
+ case *wrappedError:
+ cb(e.Outer)
+ Walk(e.Inner, cb)
+ case Wrapper:
+ cb(err)
+
+ for _, err := range e.WrappedErrors() {
+ Walk(err, cb)
+ }
+ default:
+ cb(err)
+ }
+}
+
+// wrappedError is an implementation of error that has both the
+// outer and inner errors.
+type wrappedError struct {
+ Outer error
+ Inner error
+}
+
+func (w *wrappedError) Error() string {
+ return w.Outer.Error()
+}
+
+func (w *wrappedError) WrappedErrors() []error {
+ return []error{w.Outer, w.Inner}
+}
diff --git a/vendor/github.com/hashicorp/go-getter/LICENSE b/vendor/github.com/hashicorp/go-getter/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md
new file mode 100644
index 00000000..4a0b6a62
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/README.md
@@ -0,0 +1,253 @@
+# go-getter
+
+[![Build Status](http://img.shields.io/travis/hashicorp/go-getter.svg?style=flat-square)][travis]
+[![Build status](https://ci.appveyor.com/api/projects/status/ulq3qr43n62croyq/branch/master?svg=true)][appveyor]
+[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
+
+[travis]: http://travis-ci.org/hashicorp/go-getter
+[godocs]: http://godoc.org/github.com/hashicorp/go-getter
+[appveyor]: https://ci.appveyor.com/project/hashicorp/go-getter/branch/master
+
+go-getter is a library for Go (golang) for downloading files or directories
+from various sources using a URL as the primary form of input.
+
+The power of this library is being flexible in being able to download
+from a number of different sources (file paths, Git, HTTP, Mercurial, etc.)
+using a single string as input. This removes the burden of knowing how to
+download from a variety of sources from the implementer.
+
+The concept of a _detector_ automatically turns invalid URLs into proper
+URLs. For example: "github.com/hashicorp/go-getter" would turn into a
+Git URL. Or "./foo" would turn into a file URL. These are extensible.
+
+This library is used by [Terraform](https://terraform.io) for
+downloading modules, [Otto](https://ottoproject.io) for dependencies and
+Appfile imports, and [Nomad](https://nomadproject.io) for downloading
+binaries.
+
+## Installation and Usage
+
+Package documentation can be found on
+[GoDoc](http://godoc.org/github.com/hashicorp/go-getter).
+
+Installation can be done with a normal `go get`:
+
+```
+$ go get github.com/hashicorp/go-getter
+```
+
+go-getter also has a command you can use to test URL strings:
+
+```
+$ go install github.com/hashicorp/go-getter/cmd/go-getter
+...
+
+$ go-getter github.com/foo/bar ./foo
+...
+```
+
+The command is useful for verifying URL structures.
+
+## URL Format
+
+go-getter uses a single string URL as input to download from a variety of
+protocols. go-getter has various "tricks" with this URL to do certain things.
+This section documents the URL format.
+
+### Supported Protocols and Detectors
+
+**Protocols** are used to download files/directories using a specific
+mechanism. Example protocols are Git and HTTP.
+
+**Detectors** are used to transform a valid or invalid URL into another
+URL if it matches a certain pattern. Example: "github.com/user/repo" is
+automatically transformed into a fully valid Git URL. This allows go-getter
+to be very user friendly.
+
+go-getter out of the box supports the following protocols. Additional protocols
+can be augmented at runtime by implementing the `Getter` interface.
+
+ * Local files
+ * Git
+ * Mercurial
+ * HTTP
+ * Amazon S3
+
+In addition to the above protocols, go-getter has what are called "detectors."
+These take a URL and attempt to automatically choose the best protocol for
+it, which might involve even changing the protocol. The following detection
+is built-in by default:
+
+ * File paths such as "./foo" are automatically changed to absolute
+ file URLs.
+ * GitHub URLs, such as "github.com/mitchellh/vagrant" are automatically
+ changed to Git protocol over HTTP.
+ * BitBucket URLs, such as "bitbucket.org/mitchellh/vagrant" are automatically
+ changed to a Git or mercurial protocol using the BitBucket API.
+
+### Forced Protocol
+
+In some cases, the protocol to use is ambiguous depending on the source
+URL. For example, "http://github.com/mitchellh/vagrant.git" could reference
+an HTTP URL or a Git URL. Forced protocol syntax is used to disambiguate this
+URL.
+
+Forced protocol can be done by prefixing the URL with the protocol followed
+by double colons. For example: `git::http://github.com/mitchellh/vagrant.git`
+would download the given HTTP URL using the Git protocol.
+
+Forced protocols will also override any detectors.
+
+In the absense of a forced protocol, detectors may be run on the URL, transforming
+the protocol anyways. The above example would've used the Git protocol either
+way since the Git detector would've detected it was a GitHub URL.
+
+### Protocol-Specific Options
+
+Each protocol can support protocol-specific options to configure that
+protocol. For example, the `git` protocol supports specifying a `ref`
+query parameter that tells it what ref to checkout for that Git
+repository.
+
+The options are specified as query parameters on the URL (or URL-like string)
+given to go-getter. Using the Git example above, the URL below is a valid
+input to go-getter:
+
+ github.com/hashicorp/go-getter?ref=abcd1234
+
+The protocol-specific options are documented below the URL format
+section. But because they are part of the URL, we point it out here so
+you know they exist.
+
+### Checksumming
+
+For file downloads of any protocol, go-getter can automatically verify
+a checksum for you. Note that checksumming only works for downloading files,
+not directories, but checksumming will work for any protocol.
+
+To checksum a file, append a `checksum` query parameter to the URL.
+The paramter value should be in the format of `type:value`, where
+type is "md5", "sha1", "sha256", or "sha512". The "value" should be
+the actual checksum value. go-getter will parse out this query parameter
+automatically and use it to verify the checksum. An example URL
+is shown below:
+
+```
+./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21
+```
+
+The checksum query parameter is never sent to the backend protocol
+implementation. It is used at a higher level by go-getter itself.
+
+### Unarchiving
+
+go-getter will automatically unarchive files into a file or directory
+based on the extension of the file being requested (over any protocol).
+This works for both file and directory downloads.
+
+go-getter looks for an `archive` query parameter to specify the format of
+the archive. If this isn't specified, go-getter will use the extension of
+the path to see if it appears archived. Unarchiving can be explicitly
+disabled by setting the `archive` query parameter to `false`.
+
+The following archive formats are supported:
+
+ * `tar.gz` and `tgz`
+ * `tar.bz2` and `tbz2`
+ * `zip`
+ * `gz`
+ * `bz2`
+
+For example, an example URL is shown below:
+
+```
+./foo.zip
+```
+
+This will automatically be inferred to be a ZIP file and will be extracted.
+You can also be explicit about the archive type:
+
+```
+./some/other/path?archive=zip
+```
+
+And finally, you can disable archiving completely:
+
+```
+./some/path?archive=false
+```
+
+You can combine unarchiving with the other features of go-getter such
+as checksumming. The special `archive` query parameter will be removed
+from the URL before going to the final protocol downloader.
+
+## Protocol-Specific Options
+
+This section documents the protocol-specific options that can be specified
+for go-getter. These options should be appended to the input as normal query
+parameters. Depending on the usage of go-getter, applications may provide
+alternate ways of inputting options. For example, [Nomad](https://www.nomadproject.io)
+provides a nice options block for specifying options rather than in the URL.
+
+## General (All Protocols)
+
+The options below are available to all protocols:
+
+ * `archive` - The archive format to use to unarchive this file, or "" (empty
+ string) to disable unarchiving. For more details, see the complete section
+ on archive support above.
+
+ * `checksum` - Checksum to verify the downloaded file or archive. See
+ the entire section on checksumming above for format and more details.
+
+### Local Files (`file`)
+
+None
+
+### Git (`git`)
+
+ * `ref` - The Git ref to checkout. This is a ref, so it can point to
+ a commit SHA, a branch name, etc. If it is a named ref such as a branch
+ name, go-getter will update it to the latest on each get.
+
+ * `sshkey` - An SSH private key to use during clones. The provided key must
+ be a base64-encoded string. For example, to generate a suitable `sshkey`
+ from a private key file on disk, you would run `base64 -w0 <file>`.
+
+ **Note**: Git 2.3+ is required to use this feature.
+
+### Mercurial (`hg`)
+
+ * `rev` - The Mercurial revision to checkout.
+
+### HTTP (`http`)
+
+None
+
+### S3 (`s3`)
+
+S3 takes various access configurations in the URL. Note that it will also
+read these from standard AWS environment variables if they're set. If
+the query parameters are present, these take priority.
+
+ * `aws_access_key_id` - AWS access key.
+ * `aws_access_key_secret` - AWS access key secret.
+ * `aws_access_token` - AWS access token if this is being used.
+
+#### Using IAM Instance Profiles with S3
+
+If you use go-getter and want to use an EC2 IAM Instance Profile to avoid
+using credentials, then just omit these and the profile, if available will
+be used automatically.
+
+#### S3 Bucket Examples
+
+S3 has several addressing schemes used to reference your bucket. These are
+listed here: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
+
+Some examples for these addressing schemes:
+- s3::https://s3.amazonaws.com/bucket/foo
+- s3::https://s3-eu-west-1.amazonaws.com/bucket/foo
+- bucket.s3.amazonaws.com/foo
+- bucket.s3-eu-west-1.amazonaws.com/foo/bar
+
diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go
new file mode 100644
index 00000000..876812a0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/client.go
@@ -0,0 +1,335 @@
+package getter
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ urlhelper "github.com/hashicorp/go-getter/helper/url"
+)
+
+// Client is a client for downloading things.
+//
+// Top-level functions such as Get are shortcuts for interacting with a client.
+// Using a client directly allows more fine-grained control over how downloading
+// is done, as well as customizing the protocols supported.
+type Client struct {
+ // Src is the source URL to get.
+ //
+ // Dst is the path to save the downloaded thing as. If Dir is set to
+ // true, then this should be a directory. If the directory doesn't exist,
+ // it will be created for you.
+ //
+ // Pwd is the working directory for detection. If this isn't set, some
+ // detection may fail. Client will not default pwd to the current
+ // working directory for security reasons.
+ Src string
+ Dst string
+ Pwd string
+
+ // Mode is the method of download the client will use. See ClientMode
+ // for documentation.
+ Mode ClientMode
+
+ // Detectors is the list of detectors that are tried on the source.
+ // If this is nil, then the default Detectors will be used.
+ Detectors []Detector
+
+ // Decompressors is the map of decompressors supported by this client.
+ // If this is nil, then the default value is the Decompressors global.
+ Decompressors map[string]Decompressor
+
+ // Getters is the map of protocols supported by this client. If this
+ // is nil, then the default Getters variable will be used.
+ Getters map[string]Getter
+
+ // Dir, if true, tells the Client it is downloading a directory (versus
+ // a single file). This distinction is necessary since filenames and
+ // directory names follow the same format so disambiguating is impossible
+ // without knowing ahead of time.
+ //
+ // WARNING: deprecated. If Mode is set, that will take precedence.
+ Dir bool
+}
+
+// Get downloads the configured source to the destination.
+func (c *Client) Get() error {
+ // Store this locally since there are cases we swap this
+ mode := c.Mode
+ if mode == ClientModeInvalid {
+ if c.Dir {
+ mode = ClientModeDir
+ } else {
+ mode = ClientModeFile
+ }
+ }
+
+ // Default decompressor value
+ decompressors := c.Decompressors
+ if decompressors == nil {
+ decompressors = Decompressors
+ }
+
+ // Detect the URL. This is safe if it is already detected.
+ detectors := c.Detectors
+ if detectors == nil {
+ detectors = Detectors
+ }
+ src, err := Detect(c.Src, c.Pwd, detectors)
+ if err != nil {
+ return err
+ }
+
+ // Determine if we have a forced protocol, i.e. "git::http://..."
+ force, src := getForcedGetter(src)
+
+ // If there is a subdir component, then we download the root separately
+ // and then copy over the proper subdir.
+ var realDst string
+ dst := c.Dst
+ src, subDir := SourceDirSubdir(src)
+ if subDir != "" {
+ tmpDir, err := ioutil.TempDir("", "tf")
+ if err != nil {
+ return err
+ }
+ if err := os.RemoveAll(tmpDir); err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpDir)
+
+ realDst = dst
+ dst = tmpDir
+ }
+
+ u, err := urlhelper.Parse(src)
+ if err != nil {
+ return err
+ }
+ if force == "" {
+ force = u.Scheme
+ }
+
+ getters := c.Getters
+ if getters == nil {
+ getters = Getters
+ }
+
+ g, ok := getters[force]
+ if !ok {
+ return fmt.Errorf(
+ "download not supported for scheme '%s'", force)
+ }
+
+ // We have magic query parameters that we use to signal different features
+ q := u.Query()
+
+ // Determine if we have an archive type
+ archiveV := q.Get("archive")
+ if archiveV != "" {
+ // Delete the paramter since it is a magic parameter we don't
+ // want to pass on to the Getter
+ q.Del("archive")
+ u.RawQuery = q.Encode()
+
+ // If we can parse the value as a bool and it is false, then
+ // set the archive to "-" which should never map to a decompressor
+ if b, err := strconv.ParseBool(archiveV); err == nil && !b {
+ archiveV = "-"
+ }
+ }
+ if archiveV == "" {
+ // We don't appear to... but is it part of the filename?
+ matchingLen := 0
+ for k, _ := range decompressors {
+ if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen {
+ archiveV = k
+ matchingLen = len(k)
+ }
+ }
+ }
+
+ // If we have a decompressor, then we need to change the destination
+ // to download to a temporary path. We unarchive this into the final,
+ // real path.
+ var decompressDst string
+ var decompressDir bool
+ decompressor := decompressors[archiveV]
+ if decompressor != nil {
+ // Create a temporary directory to store our archive. We delete
+ // this at the end of everything.
+ td, err := ioutil.TempDir("", "getter")
+ if err != nil {
+ return fmt.Errorf(
+ "Error creating temporary directory for archive: %s", err)
+ }
+ defer os.RemoveAll(td)
+
+ // Swap the download directory to be our temporary path and
+ // store the old values.
+ decompressDst = dst
+ decompressDir = mode != ClientModeFile
+ dst = filepath.Join(td, "archive")
+ mode = ClientModeFile
+ }
+
+ // Determine if we have a checksum
+ var checksumHash hash.Hash
+ var checksumValue []byte
+ if v := q.Get("checksum"); v != "" {
+ // Delete the query parameter if we have it.
+ q.Del("checksum")
+ u.RawQuery = q.Encode()
+
+ // Determine the checksum hash type
+ checksumType := ""
+ idx := strings.Index(v, ":")
+ if idx > -1 {
+ checksumType = v[:idx]
+ }
+ switch checksumType {
+ case "md5":
+ checksumHash = md5.New()
+ case "sha1":
+ checksumHash = sha1.New()
+ case "sha256":
+ checksumHash = sha256.New()
+ case "sha512":
+ checksumHash = sha512.New()
+ default:
+ return fmt.Errorf(
+ "unsupported checksum type: %s", checksumType)
+ }
+
+ // Get the remainder of the value and parse it into bytes
+ b, err := hex.DecodeString(v[idx+1:])
+ if err != nil {
+ return fmt.Errorf("invalid checksum: %s", err)
+ }
+
+ // Set our value
+ checksumValue = b
+ }
+
+ if mode == ClientModeAny {
+ // Ask the getter which client mode to use
+ mode, err = g.ClientMode(u)
+ if err != nil {
+ return err
+ }
+
+ // Destination is the base name of the URL path in "any" mode when
+ // a file source is detected.
+ if mode == ClientModeFile {
+ dst = filepath.Join(dst, filepath.Base(u.Path))
+ }
+ }
+
+ // If we're not downloading a directory, then just download the file
+ // and return.
+ if mode == ClientModeFile {
+ err := g.GetFile(dst, u)
+ if err != nil {
+ return err
+ }
+
+ if checksumHash != nil {
+ if err := checksum(dst, checksumHash, checksumValue); err != nil {
+ return err
+ }
+ }
+
+ if decompressor != nil {
+ // We have a decompressor, so decompress the current destination
+ // into the final destination with the proper mode.
+ err := decompressor.Decompress(decompressDst, dst, decompressDir)
+ if err != nil {
+ return err
+ }
+
+ // Swap the information back
+ dst = decompressDst
+ if decompressDir {
+ mode = ClientModeAny
+ } else {
+ mode = ClientModeFile
+ }
+ }
+
+ // We check the dir value again because it can be switched back
+ // if we were unarchiving. If we're still only Get-ing a file, then
+ // we're done.
+ if mode == ClientModeFile {
+ return nil
+ }
+ }
+
+ // If we're at this point we're either downloading a directory or we've
+ // downloaded and unarchived a directory and we're just checking subdir.
+ // In the case we have a decompressor we don't Get because it was Get
+ // above.
+ if decompressor == nil {
+ // If we're getting a directory, then this is an error. You cannot
+ // checksum a directory. TODO: test
+ if checksumHash != nil {
+ return fmt.Errorf(
+ "checksum cannot be specified for directory download")
+ }
+
+ // We're downloading a directory, which might require a bit more work
+ // if we're specifying a subdir.
+ err := g.Get(dst, u)
+ if err != nil {
+ err = fmt.Errorf("error downloading '%s': %s", src, err)
+ return err
+ }
+ }
+
+ // If we have a subdir, copy that over
+ if subDir != "" {
+ if err := os.RemoveAll(realDst); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(realDst, 0755); err != nil {
+ return err
+ }
+
+ return copyDir(realDst, filepath.Join(dst, subDir), false)
+ }
+
+ return nil
+}
+
+// checksum is a simple method to compute the checksum of a source file
+// and compare it to the given expected value.
+func checksum(source string, h hash.Hash, v []byte) error {
+ f, err := os.Open(source)
+ if err != nil {
+ return fmt.Errorf("Failed to open file for checksum: %s", err)
+ }
+ defer f.Close()
+
+ if _, err := io.Copy(h, f); err != nil {
+ return fmt.Errorf("Failed to hash: %s", err)
+ }
+
+ if actual := h.Sum(nil); !bytes.Equal(actual, v) {
+ return fmt.Errorf(
+ "Checksums did not match.\nExpected: %s\nGot: %s",
+ hex.EncodeToString(v),
+ hex.EncodeToString(actual))
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/client_mode.go b/vendor/github.com/hashicorp/go-getter/client_mode.go
new file mode 100644
index 00000000..7f02509a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/client_mode.go
@@ -0,0 +1,24 @@
+package getter
+
+// ClientMode is the mode that the client operates in.
+type ClientMode uint
+
+const (
+ ClientModeInvalid ClientMode = iota
+
+ // ClientModeAny downloads anything it can. In this mode, dst must
+ // be a directory. If src is a file, it is saved into the directory
+ // with the basename of the URL. If src is a directory or archive,
+ // it is unpacked directly into dst.
+ ClientModeAny
+
+ // ClientModeFile downloads a single file. In this mode, dst must
+ // be a file path (doesn't have to exist). src must point to a single
+ // file. It is saved as dst.
+ ClientModeFile
+
+ // ClientModeDir downloads a directory. In this mode, dst must be
+ // a directory path (doesn't have to exist). src must point to an
+ // archive or directory (such as in s3).
+ ClientModeDir
+)
diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go
new file mode 100644
index 00000000..2f58e8ae
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/copy_dir.go
@@ -0,0 +1,78 @@
+package getter
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// copyDir copies the src directory contents into dst. Both directories
+// should already exist.
+//
+// If ignoreDot is set to true, then dot-prefixed files/folders are ignored.
+func copyDir(dst string, src string, ignoreDot bool) error {
+ src, err := filepath.EvalSymlinks(src)
+ if err != nil {
+ return err
+ }
+
+ walkFn := func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if path == src {
+ return nil
+ }
+
+ if ignoreDot && strings.HasPrefix(filepath.Base(path), ".") {
+ // Skip any dot files
+ if info.IsDir() {
+ return filepath.SkipDir
+ } else {
+ return nil
+ }
+ }
+
+ // The "path" has the src prefixed to it. We need to join our
+ // destination with the path without the src on it.
+ dstPath := filepath.Join(dst, path[len(src):])
+
+ // If we have a directory, make that subdirectory, then continue
+ // the walk.
+ if info.IsDir() {
+ if path == filepath.Join(src, dst) {
+ // dst is in src; don't walk it.
+ return nil
+ }
+
+ if err := os.MkdirAll(dstPath, 0755); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ // If we have a file, copy the contents.
+ srcF, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ dstF, err := os.Create(dstPath)
+ if err != nil {
+ return err
+ }
+ defer dstF.Close()
+
+ if _, err := io.Copy(dstF, srcF); err != nil {
+ return err
+ }
+
+ // Chmod it
+ return os.Chmod(dstPath, info.Mode())
+ }
+
+ return filepath.Walk(src, walkFn)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go
new file mode 100644
index 00000000..d18174cc
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress.go
@@ -0,0 +1,29 @@
+package getter
+
+// Decompressor defines the interface that must be implemented to add
+// support for decompressing a type.
+type Decompressor interface {
+ // Decompress should decompress src to dst. dir specifies whether dst
+ // is a directory or single file. src is guaranteed to be a single file
+ // that exists. dst is not guaranteed to exist already.
+ Decompress(dst, src string, dir bool) error
+}
+
+// Decompressors is the mapping of extension to the Decompressor implementation
+// that will decompress that extension/type.
+var Decompressors map[string]Decompressor
+
+func init() {
+ tbzDecompressor := new(TarBzip2Decompressor)
+ tgzDecompressor := new(TarGzipDecompressor)
+
+ Decompressors = map[string]Decompressor{
+ "bz2": new(Bzip2Decompressor),
+ "gz": new(GzipDecompressor),
+ "tar.bz2": tbzDecompressor,
+ "tar.gz": tgzDecompressor,
+ "tbz2": tbzDecompressor,
+ "tgz": tgzDecompressor,
+ "zip": new(ZipDecompressor),
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go
new file mode 100644
index 00000000..339f4cf7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go
@@ -0,0 +1,45 @@
+package getter
+
+import (
+ "compress/bzip2"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// Bzip2Decompressor is an implementation of Decompressor that can
+// decompress bz2 files.
+type Bzip2Decompressor struct{}
+
+func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error {
+ // Directory isn't supported at all
+ if dir {
+ return fmt.Errorf("bzip2-compressed files can only unarchive to a single file")
+ }
+
+ // If we're going into a directory we should make that first
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ // File first
+ f, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Bzip2 compression is second
+ bzipR := bzip2.NewReader(f)
+
+ // Copy it out
+ dstF, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer dstF.Close()
+
+ _, err = io.Copy(dstF, bzipR)
+ return err
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go
new file mode 100644
index 00000000..20010540
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go
@@ -0,0 +1,49 @@
+package getter
+
+import (
+ "compress/gzip"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// GzipDecompressor is an implementation of Decompressor that can
+// decompress bz2 files.
+type GzipDecompressor struct{}
+
+func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error {
+ // Directory isn't supported at all
+ if dir {
+ return fmt.Errorf("gzip-compressed files can only unarchive to a single file")
+ }
+
+ // If we're going into a directory we should make that first
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ // File first
+ f, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // gzip compression is second
+ gzipR, err := gzip.NewReader(f)
+ if err != nil {
+ return err
+ }
+ defer gzipR.Close()
+
+ // Copy it out
+ dstF, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer dstF.Close()
+
+ _, err = io.Copy(dstF, gzipR)
+ return err
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go
new file mode 100644
index 00000000..61f60431
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_tar.go
@@ -0,0 +1,83 @@
+package getter
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// untar is a shared helper for untarring an archive. The reader should provide
+// an uncompressed view of the tar archive.
+func untar(input io.Reader, dst, src string, dir bool) error {
+ tarR := tar.NewReader(input)
+ done := false
+ for {
+ hdr, err := tarR.Next()
+ if err == io.EOF {
+ if !done {
+ // Empty archive
+ return fmt.Errorf("empty archive: %s", src)
+ }
+
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ path := dst
+ if dir {
+ path = filepath.Join(path, hdr.Name)
+ }
+
+ if hdr.FileInfo().IsDir() {
+ if !dir {
+ return fmt.Errorf("expected a single file: %s", src)
+ }
+
+ // A directory, just make the directory and continue unarchiving...
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+
+ continue
+ } else {
+ // There is no ordering guarantee that a file in a directory is
+ // listed before the directory
+ dstPath := filepath.Dir(path)
+
+ // Check that the directory exists, otherwise create it
+ if _, err := os.Stat(dstPath); os.IsNotExist(err) {
+ if err := os.MkdirAll(dstPath, 0755); err != nil {
+ return err
+ }
+ }
+ }
+
+ // We have a file. If we already decoded, then it is an error
+ if !dir && done {
+ return fmt.Errorf("expected a single file, got multiple: %s", src)
+ }
+
+ // Mark that we're done so future in single file mode errors
+ done = true
+
+ // Open the file for writing
+ dstF, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(dstF, tarR)
+ dstF.Close()
+ if err != nil {
+ return err
+ }
+
+ // Chmod the file
+ if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil {
+ return err
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go
new file mode 100644
index 00000000..5391b5c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go
@@ -0,0 +1,33 @@
+package getter
+
+import (
+ "compress/bzip2"
+ "os"
+ "path/filepath"
+)
+
+// TarBzip2Decompressor is an implementation of Decompressor that can
+// decompress tar.bz2 files.
+type TarBzip2Decompressor struct{}
+
+func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error {
+ // If we're going into a directory we should make that first
+ mkdir := dst
+ if !dir {
+ mkdir = filepath.Dir(dst)
+ }
+ if err := os.MkdirAll(mkdir, 0755); err != nil {
+ return err
+ }
+
+ // File first
+ f, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Bzip2 compression is second
+ bzipR := bzip2.NewReader(f)
+ return untar(bzipR, dst, src, dir)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go
new file mode 100644
index 00000000..82b8ab4f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go
@@ -0,0 +1,135 @@
+package getter
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+
+ "github.com/mitchellh/go-testing-interface"
+)
+
+// TestDecompressCase is a single test case for testing decompressors
+type TestDecompressCase struct {
+ Input string // Input is the complete path to the input file
+ Dir bool // Dir is whether or not we're testing directory mode
+ Err bool // Err is whether we expect an error or not
+ DirList []string // DirList is the list of files for Dir mode
+ FileMD5 string // FileMD5 is the expected MD5 for a single file
+}
+
+// TestDecompressor is a helper function for testing generic decompressors.
+func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) {
+ for _, tc := range cases {
+ t.Logf("Testing: %s", tc.Input)
+
+ // Temporary dir to store stuff
+ td, err := ioutil.TempDir("", "getter")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Destination is always joining result so that we have a new path
+ dst := filepath.Join(td, "subdir", "result")
+
+ // We use a function so defers work
+ func() {
+ defer os.RemoveAll(td)
+
+ // Decompress
+ err := d.Decompress(dst, tc.Input, tc.Dir)
+ if (err != nil) != tc.Err {
+ t.Fatalf("err %s: %s", tc.Input, err)
+ }
+ if tc.Err {
+ return
+ }
+
+ // If it isn't a directory, then check for a single file
+ if !tc.Dir {
+ fi, err := os.Stat(dst)
+ if err != nil {
+ t.Fatalf("err %s: %s", tc.Input, err)
+ }
+ if fi.IsDir() {
+ t.Fatalf("err %s: expected file, got directory", tc.Input)
+ }
+ if tc.FileMD5 != "" {
+ actual := testMD5(t, dst)
+ expected := tc.FileMD5
+ if actual != expected {
+ t.Fatalf("err %s: expected MD5 %s, got %s", tc.Input, expected, actual)
+ }
+ }
+
+ return
+ }
+
+ // Convert expected for windows
+ expected := tc.DirList
+ if runtime.GOOS == "windows" {
+ for i, v := range expected {
+ expected[i] = strings.Replace(v, "/", "\\", -1)
+ }
+ }
+
+ // Directory, check for the correct contents
+ actual := testListDir(t, dst)
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected)
+ }
+ }()
+ }
+}
+
+func testListDir(t testing.T, path string) []string {
+ var result []string
+ err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ sub = strings.TrimPrefix(sub, path)
+ if sub == "" {
+ return nil
+ }
+ sub = sub[1:] // Trim the leading path sep.
+
+ // If it is a dir, add trailing sep
+ if info.IsDir() {
+ sub += string(os.PathSeparator)
+ }
+
+ result = append(result, sub)
+ return nil
+ })
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ sort.Strings(result)
+ return result
+}
+
+func testMD5(t testing.T, path string) string {
+ f, err := os.Open(path)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer f.Close()
+
+ h := md5.New()
+ _, err = io.Copy(h, f)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ result := h.Sum(nil)
+ return hex.EncodeToString(result)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go
new file mode 100644
index 00000000..65eb70dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go
@@ -0,0 +1,39 @@
+package getter
+
+import (
+ "compress/gzip"
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+// TarGzipDecompressor is an implementation of Decompressor that can
+// decompress tar.gzip files.
+type TarGzipDecompressor struct{}
+
+func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error {
+ // If we're going into a directory we should make that first
+ mkdir := dst
+ if !dir {
+ mkdir = filepath.Dir(dst)
+ }
+ if err := os.MkdirAll(mkdir, 0755); err != nil {
+ return err
+ }
+
+ // File first
+ f, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Gzip compression is second
+ gzipR, err := gzip.NewReader(f)
+ if err != nil {
+ return fmt.Errorf("Error opening a gzip reader for %s: %s", src, err)
+ }
+ defer gzipR.Close()
+
+ return untar(gzipR, dst, src, dir)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go
new file mode 100644
index 00000000..a065c076
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go
@@ -0,0 +1,96 @@
+package getter
+
+import (
+ "archive/zip"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+)
+
+// ZipDecompressor is an implementation of Decompressor that can
+// decompress tar.gzip files.
+type ZipDecompressor struct{}
+
+func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error {
+ // If we're going into a directory we should make that first
+ mkdir := dst
+ if !dir {
+ mkdir = filepath.Dir(dst)
+ }
+ if err := os.MkdirAll(mkdir, 0755); err != nil {
+ return err
+ }
+
+ // Open the zip
+ zipR, err := zip.OpenReader(src)
+ if err != nil {
+ return err
+ }
+ defer zipR.Close()
+
+ // Check the zip integrity
+ if len(zipR.File) == 0 {
+ // Empty archive
+ return fmt.Errorf("empty archive: %s", src)
+ }
+ if !dir && len(zipR.File) > 1 {
+ return fmt.Errorf("expected a single file: %s", src)
+ }
+
+ // Go through and unarchive
+ for _, f := range zipR.File {
+ path := dst
+ if dir {
+ path = filepath.Join(path, f.Name)
+ }
+
+ if f.FileInfo().IsDir() {
+ if !dir {
+ return fmt.Errorf("expected a single file: %s", src)
+ }
+
+ // A directory, just make the directory and continue unarchiving...
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+
+ continue
+ }
+
+ // Create the enclosing directories if we must. ZIP files aren't
+ // required to contain entries for just the directories so this
+ // can happen.
+ if dir {
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ }
+
+ // Open the file for reading
+ srcF, err := f.Open()
+ if err != nil {
+ return err
+ }
+
+ // Open the file for writing
+ dstF, err := os.Create(path)
+ if err != nil {
+ srcF.Close()
+ return err
+ }
+ _, err = io.Copy(dstF, srcF)
+ srcF.Close()
+ dstF.Close()
+ if err != nil {
+ return err
+ }
+
+ // Chmod the file
+ if err := os.Chmod(path, f.Mode()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go
new file mode 100644
index 00000000..481b737c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect.go
@@ -0,0 +1,97 @@
+package getter
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/hashicorp/go-getter/helper/url"
+)
+
+// Detector defines the interface that an invalid URL or a URL with a blank
+// scheme is passed through in order to determine if its shorthand for
+// something else well-known.
+type Detector interface {
+ // Detect will detect whether the string matches a known pattern to
+ // turn it into a proper URL.
+ Detect(string, string) (string, bool, error)
+}
+
+// Detectors is the list of detectors that are tried on an invalid URL.
+// This is also the order they're tried (index 0 is first).
+var Detectors []Detector
+
+func init() {
+ Detectors = []Detector{
+ new(GitHubDetector),
+ new(BitBucketDetector),
+ new(S3Detector),
+ new(FileDetector),
+ }
+}
+
+// Detect turns a source string into another source string if it is
+// detected to be of a known pattern.
+//
+// The third parameter should be the list of detectors to use in the
+// order to try them. If you don't want to configure this, just use
+// the global Detectors variable.
+//
+// This is safe to be called with an already valid source string: Detect
+// will just return it.
+func Detect(src string, pwd string, ds []Detector) (string, error) {
+ getForce, getSrc := getForcedGetter(src)
+
+ // Separate out the subdir if there is one, we don't pass that to detect
+ getSrc, subDir := SourceDirSubdir(getSrc)
+
+ u, err := url.Parse(getSrc)
+ if err == nil && u.Scheme != "" {
+ // Valid URL
+ return src, nil
+ }
+
+ for _, d := range ds {
+ result, ok, err := d.Detect(getSrc, pwd)
+ if err != nil {
+ return "", err
+ }
+ if !ok {
+ continue
+ }
+
+ var detectForce string
+ detectForce, result = getForcedGetter(result)
+ result, detectSubdir := SourceDirSubdir(result)
+
+ // If we have a subdir from the detection, then prepend it to our
+ // requested subdir.
+ if detectSubdir != "" {
+ if subDir != "" {
+ subDir = filepath.Join(detectSubdir, subDir)
+ } else {
+ subDir = detectSubdir
+ }
+ }
+ if subDir != "" {
+ u, err := url.Parse(result)
+ if err != nil {
+ return "", fmt.Errorf("Error parsing URL: %s", err)
+ }
+ u.Path += "//" + subDir
+ result = u.String()
+ }
+
+ // Preserve the forced getter if it exists. We try to use the
+ // original set force first, followed by any force set by the
+ // detector.
+ if getForce != "" {
+ result = fmt.Sprintf("%s::%s", getForce, result)
+ } else if detectForce != "" {
+ result = fmt.Sprintf("%s::%s", detectForce, result)
+ }
+
+ return result, nil
+ }
+
+ return "", fmt.Errorf("invalid source string: %s", src)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go
new file mode 100644
index 00000000..a183a17d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go
@@ -0,0 +1,66 @@
+package getter
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// BitBucketDetector implements Detector to detect BitBucket URLs and turn
+// them into URLs that the Git or Hg Getter can understand.
+type BitBucketDetector struct{}
+
+func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) {
+ if len(src) == 0 {
+ return "", false, nil
+ }
+
+ if strings.HasPrefix(src, "bitbucket.org/") {
+ return d.detectHTTP(src)
+ }
+
+ return "", false, nil
+}
+
+func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) {
+ u, err := url.Parse("https://" + src)
+ if err != nil {
+ return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err)
+ }
+
+ // We need to get info on this BitBucket repository to determine whether
+ // it is Git or Hg.
+ var info struct {
+ SCM string `json:"scm"`
+ }
+ infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path
+ resp, err := http.Get(infoUrl)
+ if err != nil {
+ return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
+ }
+ if resp.StatusCode == 403 {
+ // A private repo
+ return "", true, fmt.Errorf(
+ "shorthand BitBucket URL can't be used for private repos, " +
+ "please use a full URL")
+ }
+ dec := json.NewDecoder(resp.Body)
+ if err := dec.Decode(&info); err != nil {
+ return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
+ }
+
+ switch info.SCM {
+ case "git":
+ if !strings.HasSuffix(u.Path, ".git") {
+ u.Path += ".git"
+ }
+
+ return "git::" + u.String(), true, nil
+ case "hg":
+ return "hg::" + u.String(), true, nil
+ default:
+ return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go
new file mode 100644
index 00000000..756ea43f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_file.go
@@ -0,0 +1,67 @@
+package getter
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+// FileDetector implements Detector to detect file paths.
+type FileDetector struct{}
+
+func (d *FileDetector) Detect(src, pwd string) (string, bool, error) {
+ if len(src) == 0 {
+ return "", false, nil
+ }
+
+ if !filepath.IsAbs(src) {
+ if pwd == "" {
+ return "", true, fmt.Errorf(
+ "relative paths require a module with a pwd")
+ }
+
+ // Stat the pwd to determine if its a symbolic link. If it is,
+ // then the pwd becomes the original directory. Otherwise,
+ // `filepath.Join` below does some weird stuff.
+ //
+ // We just ignore if the pwd doesn't exist. That error will be
+ // caught later when we try to use the URL.
+ if fi, err := os.Lstat(pwd); !os.IsNotExist(err) {
+ if err != nil {
+ return "", true, err
+ }
+ if fi.Mode()&os.ModeSymlink != 0 {
+ pwd, err = os.Readlink(pwd)
+ if err != nil {
+ return "", true, err
+ }
+
+ // The symlink itself might be a relative path, so we have to
+ // resolve this to have a correctly rooted URL.
+ pwd, err = filepath.Abs(pwd)
+ if err != nil {
+ return "", true, err
+ }
+ }
+ }
+
+ src = filepath.Join(pwd, src)
+ }
+
+ return fmtFileURL(src), true, nil
+}
+
+func fmtFileURL(path string) string {
+ if runtime.GOOS == "windows" {
+ // Make sure we're using "/" on Windows. URLs are "/"-based.
+ path = filepath.ToSlash(path)
+ return fmt.Sprintf("file://%s", path)
+ }
+
+ // Make sure that we don't start with "/" since we add that below.
+ if path[0] == '/' {
+ path = path[1:]
+ }
+ return fmt.Sprintf("file:///%s", path)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go
new file mode 100644
index 00000000..c084ad9a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_github.go
@@ -0,0 +1,73 @@
+package getter
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// GitHubDetector implements Detector to detect GitHub URLs and turn
+// them into URLs that the Git Getter can understand.
+type GitHubDetector struct{}
+
+func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) {
+ if len(src) == 0 {
+ return "", false, nil
+ }
+
+ if strings.HasPrefix(src, "github.com/") {
+ return d.detectHTTP(src)
+ } else if strings.HasPrefix(src, "git@github.com:") {
+ return d.detectSSH(src)
+ }
+
+ return "", false, nil
+}
+
+func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) {
+ parts := strings.Split(src, "/")
+ if len(parts) < 3 {
+ return "", false, fmt.Errorf(
+ "GitHub URLs should be github.com/username/repo")
+ }
+
+ urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/"))
+ url, err := url.Parse(urlStr)
+ if err != nil {
+ return "", true, fmt.Errorf("error parsing GitHub URL: %s", err)
+ }
+
+ if !strings.HasSuffix(url.Path, ".git") {
+ url.Path += ".git"
+ }
+
+ if len(parts) > 3 {
+ url.Path += "//" + strings.Join(parts[3:], "/")
+ }
+
+ return "git::" + url.String(), true, nil
+}
+
+func (d *GitHubDetector) detectSSH(src string) (string, bool, error) {
+ idx := strings.Index(src, ":")
+ qidx := strings.Index(src, "?")
+ if qidx == -1 {
+ qidx = len(src)
+ }
+
+ var u url.URL
+ u.Scheme = "ssh"
+ u.User = url.User("git")
+ u.Host = "github.com"
+ u.Path = src[idx+1 : qidx]
+ if qidx < len(src) {
+ q, err := url.ParseQuery(src[qidx+1:])
+ if err != nil {
+ return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err)
+ }
+
+ u.RawQuery = q.Encode()
+ }
+
+ return "git::" + u.String(), true, nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_s3.go b/vendor/github.com/hashicorp/go-getter/detect_s3.go
new file mode 100644
index 00000000..8e0f4a03
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_s3.go
@@ -0,0 +1,61 @@
+package getter
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// S3Detector implements Detector to detect S3 URLs and turn
+// them into URLs that the S3 getter can understand.
+type S3Detector struct{}
+
+func (d *S3Detector) Detect(src, _ string) (string, bool, error) {
+ if len(src) == 0 {
+ return "", false, nil
+ }
+
+ if strings.Contains(src, ".amazonaws.com/") {
+ return d.detectHTTP(src)
+ }
+
+ return "", false, nil
+}
+
+func (d *S3Detector) detectHTTP(src string) (string, bool, error) {
+ parts := strings.Split(src, "/")
+ if len(parts) < 2 {
+ return "", false, fmt.Errorf(
+ "URL is not a valid S3 URL")
+ }
+
+ hostParts := strings.Split(parts[0], ".")
+ if len(hostParts) == 3 {
+ return d.detectPathStyle(hostParts[0], parts[1:])
+ } else if len(hostParts) == 4 {
+ return d.detectVhostStyle(hostParts[1], hostParts[0], parts[1:])
+ } else {
+ return "", false, fmt.Errorf(
+ "URL is not a valid S3 URL")
+ }
+}
+
+func (d *S3Detector) detectPathStyle(region string, parts []string) (string, bool, error) {
+ urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s", region, strings.Join(parts, "/"))
+ url, err := url.Parse(urlStr)
+ if err != nil {
+ return "", false, fmt.Errorf("error parsing S3 URL: %s", err)
+ }
+
+ return "s3::" + url.String(), true, nil
+}
+
+func (d *S3Detector) detectVhostStyle(region, bucket string, parts []string) (string, bool, error) {
+ urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s/%s", region, bucket, strings.Join(parts, "/"))
+ url, err := url.Parse(urlStr)
+ if err != nil {
+ return "", false, fmt.Errorf("error parsing S3 URL: %s", err)
+ }
+
+ return "s3::" + url.String(), true, nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/folder_storage.go b/vendor/github.com/hashicorp/go-getter/folder_storage.go
new file mode 100644
index 00000000..647ccf45
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/folder_storage.go
@@ -0,0 +1,65 @@
+package getter
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+// FolderStorage is an implementation of the Storage interface that manages
+// modules on the disk.
+type FolderStorage struct {
+ // StorageDir is the directory where the modules will be stored.
+ StorageDir string
+}
+
+// Dir implements Storage.Dir
+func (s *FolderStorage) Dir(key string) (d string, e bool, err error) {
+ d = s.dir(key)
+ _, err = os.Stat(d)
+ if err == nil {
+ // Directory exists
+ e = true
+ return
+ }
+ if os.IsNotExist(err) {
+ // Directory doesn't exist
+ d = ""
+ e = false
+ err = nil
+ return
+ }
+
+ // An error
+ d = ""
+ e = false
+ return
+}
+
+// Get implements Storage.Get
+func (s *FolderStorage) Get(key string, source string, update bool) error {
+ dir := s.dir(key)
+ if !update {
+ if _, err := os.Stat(dir); err == nil {
+ // If the directory already exists, then we're done since
+ // we're not updating.
+ return nil
+ } else if !os.IsNotExist(err) {
+ // If the error we got wasn't a file-not-exist error, then
+ // something went wrong and we should report it.
+ return fmt.Errorf("Error reading module directory: %s", err)
+ }
+ }
+
+ // Get the source. This always forces an update.
+ return Get(dir, source)
+}
+
+// dir returns the directory name internally that we'll use to map to
+// internally.
+func (s *FolderStorage) dir(key string) string {
+ sum := md5.Sum([]byte(key))
+ return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:]))
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go
new file mode 100644
index 00000000..c3236f55
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get.go
@@ -0,0 +1,139 @@
+// getter is a package for downloading files or directories from a variety of
+// protocols.
+//
+// getter is unique in its ability to download both directories and files.
+// It also detects certain source strings to be protocol-specific URLs. For
+// example, "github.com/hashicorp/go-getter" would turn into a Git URL and
+// use the Git protocol.
+//
+// Protocols and detectors are extensible.
+//
+// To get started, see Client.
+package getter
+
+import (
+ "bytes"
+ "fmt"
+ "net/url"
+ "os/exec"
+ "regexp"
+ "syscall"
+)
+
+// Getter defines the interface that schemes must implement to download
+// things.
+type Getter interface {
+ // Get downloads the given URL into the given directory. This always
+ // assumes that we're updating and gets the latest version that it can.
+ //
+ // The directory may already exist (if we're updating). If it is in a
+ // format that isn't understood, an error should be returned. Get shouldn't
+ // simply nuke the directory.
+ Get(string, *url.URL) error
+
+ // GetFile downloads the give URL into the given path. The URL must
+ // reference a single file. If possible, the Getter should check if
+ // the remote end contains the same file and no-op this operation.
+ GetFile(string, *url.URL) error
+
+ // ClientMode returns the mode based on the given URL. This is used to
+ // allow clients to let the getters decide which mode to use.
+ ClientMode(*url.URL) (ClientMode, error)
+}
+
+// Getters is the mapping of scheme to the Getter implementation that will
+// be used to get a dependency.
+var Getters map[string]Getter
+
+// forcedRegexp is the regular expression that finds forced getters. This
+// syntax is schema::url, example: git::https://foo.com
+var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`)
+
+func init() {
+ httpGetter := &HttpGetter{Netrc: true}
+
+ Getters = map[string]Getter{
+ "file": new(FileGetter),
+ "git": new(GitGetter),
+ "hg": new(HgGetter),
+ "s3": new(S3Getter),
+ "http": httpGetter,
+ "https": httpGetter,
+ }
+}
+
+// Get downloads the directory specified by src into the folder specified by
+// dst. If dst already exists, Get will attempt to update it.
+//
+// src is a URL, whereas dst is always just a file path to a folder. This
+// folder doesn't need to exist. It will be created if it doesn't exist.
+func Get(dst, src string) error {
+ return (&Client{
+ Src: src,
+ Dst: dst,
+ Dir: true,
+ Getters: Getters,
+ }).Get()
+}
+
+// GetAny downloads a URL into the given destination. Unlike Get or
+// GetFile, both directories and files are supported.
+//
+// dst must be a directory. If src is a file, it will be downloaded
+// into dst with the basename of the URL. If src is a directory or
+// archive, it will be unpacked directly into dst.
+func GetAny(dst, src string) error {
+ return (&Client{
+ Src: src,
+ Dst: dst,
+ Mode: ClientModeAny,
+ Getters: Getters,
+ }).Get()
+}
+
+// GetFile downloads the file specified by src into the path specified by
+// dst.
+func GetFile(dst, src string) error {
+ return (&Client{
+ Src: src,
+ Dst: dst,
+ Dir: false,
+ Getters: Getters,
+ }).Get()
+}
+
+// getRunCommand is a helper that will run a command and capture the output
+// in the case an error happens.
+func getRunCommand(cmd *exec.Cmd) error {
+ var buf bytes.Buffer
+ cmd.Stdout = &buf
+ cmd.Stderr = &buf
+ err := cmd.Run()
+ if err == nil {
+ return nil
+ }
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ // The program has exited with an exit code != 0
+ if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+ return fmt.Errorf(
+ "%s exited with %d: %s",
+ cmd.Path,
+ status.ExitStatus(),
+ buf.String())
+ }
+ }
+
+ return fmt.Errorf("error running %s: %s", cmd.Path, buf.String())
+}
+
+// getForcedGetter takes a source and returns the tuple of the forced
+// getter and the raw URL (without the force syntax).
+func getForcedGetter(src string) (string, string) {
+ var forced string
+ if ms := forcedRegexp.FindStringSubmatch(src); ms != nil {
+ forced = ms[1]
+ src = ms[2]
+ }
+
+ return forced, src
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go
new file mode 100644
index 00000000..e5d2d61d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_file.go
@@ -0,0 +1,32 @@
+package getter
+
+import (
+ "net/url"
+ "os"
+)
+
+// FileGetter is a Getter implementation that will download a module from
+// a file scheme.
+type FileGetter struct {
+ // Copy, if set to true, will copy data instead of using a symlink
+ Copy bool
+}
+
+func (g *FileGetter) ClientMode(u *url.URL) (ClientMode, error) {
+ path := u.Path
+ if u.RawPath != "" {
+ path = u.RawPath
+ }
+
+ fi, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Check if the source is a directory.
+ if fi.IsDir() {
+ return ClientModeDir, nil
+ }
+
+ return ClientModeFile, nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go
new file mode 100644
index 00000000..c89a2d5a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_file_unix.go
@@ -0,0 +1,103 @@
+// +build !windows
+
+package getter
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+)
+
+func (g *FileGetter) Get(dst string, u *url.URL) error {
+ path := u.Path
+ if u.RawPath != "" {
+ path = u.RawPath
+ }
+
+ // The source path must exist and be a directory to be usable.
+ if fi, err := os.Stat(path); err != nil {
+ return fmt.Errorf("source path error: %s", err)
+ } else if !fi.IsDir() {
+ return fmt.Errorf("source path must be a directory")
+ }
+
+ fi, err := os.Lstat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ // If the destination already exists, it must be a symlink
+ if err == nil {
+ mode := fi.Mode()
+ if mode&os.ModeSymlink == 0 {
+ return fmt.Errorf("destination exists and is not a symlink")
+ }
+
+ // Remove the destination
+ if err := os.Remove(dst); err != nil {
+ return err
+ }
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ return os.Symlink(path, dst)
+}
+
+func (g *FileGetter) GetFile(dst string, u *url.URL) error {
+ path := u.Path
+ if u.RawPath != "" {
+ path = u.RawPath
+ }
+
+ // The source path must exist and be a file to be usable.
+ if fi, err := os.Stat(path); err != nil {
+ return fmt.Errorf("source path error: %s", err)
+ } else if fi.IsDir() {
+ return fmt.Errorf("source path must be a file")
+ }
+
+ _, err := os.Lstat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ // If the destination already exists, it must be a symlink
+ if err == nil {
+ // Remove the destination
+ if err := os.Remove(dst); err != nil {
+ return err
+ }
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ // If we're not copying, just symlink and we're done
+ if !g.Copy {
+ return os.Symlink(path, dst)
+ }
+
+ // Copy
+ srcF, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ dstF, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer dstF.Close()
+
+ _, err = io.Copy(dstF, srcF)
+ return err
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go
new file mode 100644
index 00000000..f87ed0a0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_file_windows.go
@@ -0,0 +1,120 @@
+// +build windows
+
+package getter
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+func (g *FileGetter) Get(dst string, u *url.URL) error {
+ path := u.Path
+ if u.RawPath != "" {
+ path = u.RawPath
+ }
+
+ // The source path must exist and be a directory to be usable.
+ if fi, err := os.Stat(path); err != nil {
+ return fmt.Errorf("source path error: %s", err)
+ } else if !fi.IsDir() {
+ return fmt.Errorf("source path must be a directory")
+ }
+
+ fi, err := os.Lstat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ // If the destination already exists, it must be a symlink
+ if err == nil {
+ mode := fi.Mode()
+ if mode&os.ModeSymlink == 0 {
+ return fmt.Errorf("destination exists and is not a symlink")
+ }
+
+ // Remove the destination
+ if err := os.Remove(dst); err != nil {
+ return err
+ }
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ sourcePath := toBackslash(path)
+
+ // Use mklink to create a junction point
+ output, err := exec.Command("cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output)
+ }
+
+ return nil
+}
+
+func (g *FileGetter) GetFile(dst string, u *url.URL) error {
+ path := u.Path
+ if u.RawPath != "" {
+ path = u.RawPath
+ }
+
+ // The source path must exist and be a directory to be usable.
+ if fi, err := os.Stat(path); err != nil {
+ return fmt.Errorf("source path error: %s", err)
+ } else if fi.IsDir() {
+ return fmt.Errorf("source path must be a file")
+ }
+
+ _, err := os.Lstat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ // If the destination already exists, it must be a symlink
+ if err == nil {
+ // Remove the destination
+ if err := os.Remove(dst); err != nil {
+ return err
+ }
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ // If we're not copying, just symlink and we're done
+ if !g.Copy {
+ return os.Symlink(path, dst)
+ }
+
+ // Copy
+ srcF, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ dstF, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer dstF.Close()
+
+ _, err = io.Copy(dstF, srcF)
+ return err
+}
+
+// toBackslash returns the result of replacing each slash character
+// in path with a backslash ('\') character. Multiple separators are
+// replaced by multiple backslashes.
+func toBackslash(path string) string {
+ return strings.Replace(path, "/", "\\", -1)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go
new file mode 100644
index 00000000..07281398
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_git.go
@@ -0,0 +1,225 @@
+package getter
+
+import (
+ "encoding/base64"
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ urlhelper "github.com/hashicorp/go-getter/helper/url"
+ "github.com/hashicorp/go-version"
+)
+
+// GitGetter is a Getter implementation that will download a module from
+// a git repository.
+type GitGetter struct{}
+
+func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) {
+ return ClientModeDir, nil
+}
+
+func (g *GitGetter) Get(dst string, u *url.URL) error {
+ if _, err := exec.LookPath("git"); err != nil {
+ return fmt.Errorf("git must be available and on the PATH")
+ }
+
+ // Extract some query parameters we use
+ var ref, sshKey string
+ q := u.Query()
+ if len(q) > 0 {
+ ref = q.Get("ref")
+ q.Del("ref")
+
+ sshKey = q.Get("sshkey")
+ q.Del("sshkey")
+
+ // Copy the URL
+ var newU url.URL = *u
+ u = &newU
+ u.RawQuery = q.Encode()
+ }
+
+ var sshKeyFile string
+ if sshKey != "" {
+ // Check that the git version is sufficiently new.
+ if err := checkGitVersion("2.3"); err != nil {
+ return fmt.Errorf("Error using ssh key: %v", err)
+ }
+
+ // We have an SSH key - decode it.
+ raw, err := base64.StdEncoding.DecodeString(sshKey)
+ if err != nil {
+ return err
+ }
+
+ // Create a temp file for the key and ensure it is removed.
+ fh, err := ioutil.TempFile("", "go-getter")
+ if err != nil {
+ return err
+ }
+ sshKeyFile = fh.Name()
+ defer os.Remove(sshKeyFile)
+
+ // Set the permissions prior to writing the key material.
+ if err := os.Chmod(sshKeyFile, 0600); err != nil {
+ return err
+ }
+
+ // Write the raw key into the temp file.
+ _, err = fh.Write(raw)
+ fh.Close()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Clone or update the repository
+ _, err := os.Stat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ err = g.update(dst, sshKeyFile, ref)
+ } else {
+ err = g.clone(dst, sshKeyFile, u)
+ }
+ if err != nil {
+ return err
+ }
+
+ // Next: check out the proper tag/branch if it is specified, and checkout
+ if ref != "" {
+ if err := g.checkout(dst, ref); err != nil {
+ return err
+ }
+ }
+
+ // Lastly, download any/all submodules.
+ return g.fetchSubmodules(dst, sshKeyFile)
+}
+
+// GetFile for Git doesn't support updating at this time. It will download
+// the file every time.
+func (g *GitGetter) GetFile(dst string, u *url.URL) error {
+ td, err := ioutil.TempDir("", "getter-git")
+ if err != nil {
+ return err
+ }
+ if err := os.RemoveAll(td); err != nil {
+ return err
+ }
+
+ // Get the filename, and strip the filename from the URL so we can
+ // just get the repository directly.
+ filename := filepath.Base(u.Path)
+ u.Path = filepath.Dir(u.Path)
+
+ // Get the full repository
+ if err := g.Get(td, u); err != nil {
+ return err
+ }
+
+ // Copy the single file
+ u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename)))
+ if err != nil {
+ return err
+ }
+
+ fg := &FileGetter{Copy: true}
+ return fg.GetFile(dst, u)
+}
+
+func (g *GitGetter) checkout(dst string, ref string) error {
+ cmd := exec.Command("git", "checkout", ref)
+ cmd.Dir = dst
+ return getRunCommand(cmd)
+}
+
+func (g *GitGetter) clone(dst, sshKeyFile string, u *url.URL) error {
+ cmd := exec.Command("git", "clone", u.String(), dst)
+ setupGitEnv(cmd, sshKeyFile)
+ return getRunCommand(cmd)
+}
+
+func (g *GitGetter) update(dst, sshKeyFile, ref string) error {
+ // Determine if we're a branch. If we're NOT a branch, then we just
+ // switch to master prior to checking out
+ cmd := exec.Command("git", "show-ref", "-q", "--verify", "refs/heads/"+ref)
+ cmd.Dir = dst
+
+ if getRunCommand(cmd) != nil {
+ // Not a branch, switch to master. This will also catch non-existent
+ // branches, in which case we want to switch to master and then
+ // checkout the proper branch later.
+ ref = "master"
+ }
+
+ // We have to be on a branch to pull
+ if err := g.checkout(dst, ref); err != nil {
+ return err
+ }
+
+ cmd = exec.Command("git", "pull", "--ff-only")
+ cmd.Dir = dst
+ setupGitEnv(cmd, sshKeyFile)
+ return getRunCommand(cmd)
+}
+
+// fetchSubmodules downloads any configured submodules recursively.
+func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error {
+ cmd := exec.Command("git", "submodule", "update", "--init", "--recursive")
+ cmd.Dir = dst
+ setupGitEnv(cmd, sshKeyFile)
+ return getRunCommand(cmd)
+}
+
+// setupGitEnv sets up the environment for the given command. This is used to
+// pass configuration data to git and ssh and enables advanced cloning methods.
+func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) {
+ var sshOpts []string
+
+ if sshKeyFile != "" {
+ // We have an SSH key temp file configured, tell ssh about this.
+ sshOpts = append(sshOpts, "-i", sshKeyFile)
+ }
+
+ cmd.Env = append(os.Environ(),
+ // Set the ssh command to use for clones.
+ "GIT_SSH_COMMAND=ssh "+strings.Join(sshOpts, " "),
+ )
+}
+
+// checkGitVersion is used to check the version of git installed on the system
+// against a known minimum version. Returns an error if the installed version
+// is older than the given minimum.
+func checkGitVersion(min string) error {
+ want, err := version.NewVersion(min)
+ if err != nil {
+ return err
+ }
+
+ out, err := exec.Command("git", "version").Output()
+ if err != nil {
+ return err
+ }
+
+ fields := strings.Fields(string(out))
+ if len(fields) != 3 {
+ return fmt.Errorf("Unexpected 'git version' output: %q", string(out))
+ }
+
+ have, err := version.NewVersion(fields[2])
+ if err != nil {
+ return err
+ }
+
+ if have.LessThan(want) {
+ return fmt.Errorf("Required git version = %s, have %s", want, have)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go
new file mode 100644
index 00000000..820bdd48
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_hg.go
@@ -0,0 +1,131 @@
+package getter
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+
+ urlhelper "github.com/hashicorp/go-getter/helper/url"
+)
+
+// HgGetter is a Getter implementation that will download a module from
+// a Mercurial repository.
+type HgGetter struct{}
+
+func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) {
+ return ClientModeDir, nil
+}
+
+func (g *HgGetter) Get(dst string, u *url.URL) error {
+ if _, err := exec.LookPath("hg"); err != nil {
+ return fmt.Errorf("hg must be available and on the PATH")
+ }
+
+ newURL, err := urlhelper.Parse(u.String())
+ if err != nil {
+ return err
+ }
+ if fixWindowsDrivePath(newURL) {
+ // See valid file path form on http://www.selenic.com/hg/help/urls
+ newURL.Path = fmt.Sprintf("/%s", newURL.Path)
+ }
+
+ // Extract some query parameters we use
+ var rev string
+ q := newURL.Query()
+ if len(q) > 0 {
+ rev = q.Get("rev")
+ q.Del("rev")
+
+ newURL.RawQuery = q.Encode()
+ }
+
+ _, err = os.Stat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err != nil {
+ if err := g.clone(dst, newURL); err != nil {
+ return err
+ }
+ }
+
+ if err := g.pull(dst, newURL); err != nil {
+ return err
+ }
+
+ return g.update(dst, newURL, rev)
+}
+
+// GetFile for Hg doesn't support updating at this time. It will download
+// the file every time.
+func (g *HgGetter) GetFile(dst string, u *url.URL) error {
+ td, err := ioutil.TempDir("", "getter-hg")
+ if err != nil {
+ return err
+ }
+ if err := os.RemoveAll(td); err != nil {
+ return err
+ }
+
+ // Get the filename, and strip the filename from the URL so we can
+ // just get the repository directly.
+ filename := filepath.Base(u.Path)
+ u.Path = filepath.ToSlash(filepath.Dir(u.Path))
+
+ // If we're on Windows, we need to set the host to "localhost" for hg
+ if runtime.GOOS == "windows" {
+ u.Host = "localhost"
+ }
+
+ // Get the full repository
+ if err := g.Get(td, u); err != nil {
+ return err
+ }
+
+ // Copy the single file
+ u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename)))
+ if err != nil {
+ return err
+ }
+
+ fg := &FileGetter{Copy: true}
+ return fg.GetFile(dst, u)
+}
+
+func (g *HgGetter) clone(dst string, u *url.URL) error {
+ cmd := exec.Command("hg", "clone", "-U", u.String(), dst)
+ return getRunCommand(cmd)
+}
+
+func (g *HgGetter) pull(dst string, u *url.URL) error {
+ cmd := exec.Command("hg", "pull")
+ cmd.Dir = dst
+ return getRunCommand(cmd)
+}
+
+func (g *HgGetter) update(dst string, u *url.URL, rev string) error {
+ args := []string{"update"}
+ if rev != "" {
+ args = append(args, rev)
+ }
+
+ cmd := exec.Command("hg", args...)
+ cmd.Dir = dst
+ return getRunCommand(cmd)
+}
+
+func fixWindowsDrivePath(u *url.URL) bool {
+ // hg assumes a file:/// prefix for Windows drive letter file paths.
+ // (e.g. file:///c:/foo/bar)
+ // If the URL Path does not begin with a '/' character, the resulting URL
+ // path will have a file:// prefix. (e.g. file://c:/foo/bar)
+ // See http://www.selenic.com/hg/help/urls and the examples listed in
+ // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936
+ return runtime.GOOS == "windows" && u.Scheme == "file" &&
+ len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':'
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go
new file mode 100644
index 00000000..661d8989
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_http.go
@@ -0,0 +1,227 @@
+package getter
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// HttpGetter is a Getter implementation that will download from an HTTP
+// endpoint.
+//
+// For file downloads, HTTP is used directly.
+//
+// The protocol for downloading a directory from an HTTP endpoing is as follows:
+//
+// An HTTP GET request is made to the URL with the additional GET parameter
+// "terraform-get=1". This lets you handle that scenario specially if you
+// wish. The response must be a 2xx.
+//
+// First, a header is looked for "X-Terraform-Get" which should contain
+// a source URL to download.
+//
+// If the header is not present, then a meta tag is searched for named
+// "terraform-get" and the content should be a source URL.
+//
+// The source URL, whether from the header or meta tag, must be a fully
+// formed URL. The shorthand syntax of "github.com/foo/bar" or relative
+// paths are not allowed.
+type HttpGetter struct {
+ // Netrc, if true, will lookup and use auth information found
+ // in the user's netrc file if available.
+ Netrc bool
+}
+
+func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) {
+ if strings.HasSuffix(u.Path, "/") {
+ return ClientModeDir, nil
+ }
+ return ClientModeFile, nil
+}
+
+func (g *HttpGetter) Get(dst string, u *url.URL) error {
+ // Copy the URL so we can modify it
+ var newU url.URL = *u
+ u = &newU
+
+ if g.Netrc {
+ // Add auth from netrc if we can
+ if err := addAuthFromNetrc(u); err != nil {
+ return err
+ }
+ }
+
+ // Add terraform-get to the parameter.
+ q := u.Query()
+ q.Add("terraform-get", "1")
+ u.RawQuery = q.Encode()
+
+ // Get the URL
+ resp, err := http.Get(u.String())
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+ return fmt.Errorf("bad response code: %d", resp.StatusCode)
+ }
+
+ // Extract the source URL
+ var source string
+ if v := resp.Header.Get("X-Terraform-Get"); v != "" {
+ source = v
+ } else {
+ source, err = g.parseMeta(resp.Body)
+ if err != nil {
+ return err
+ }
+ }
+ if source == "" {
+ return fmt.Errorf("no source URL was returned")
+ }
+
+ // If there is a subdir component, then we download the root separately
+ // into a temporary directory, then copy over the proper subdir.
+ source, subDir := SourceDirSubdir(source)
+ if subDir == "" {
+ return Get(dst, source)
+ }
+
+ // We have a subdir, time to jump some hoops
+ return g.getSubdir(dst, source, subDir)
+}
+
+func (g *HttpGetter) GetFile(dst string, u *url.URL) error {
+
+ if g.Netrc {
+ // Add auth from netrc if we can
+ if err := addAuthFromNetrc(u); err != nil {
+ return err
+ }
+ }
+
+ resp, err := http.Get(u.String())
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return fmt.Errorf("bad response code: %d", resp.StatusCode)
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ f, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ _, err = io.Copy(f, resp.Body)
+ return err
+}
+
+// getSubdir downloads the source into the destination, but with
+// the proper subdir.
+func (g *HttpGetter) getSubdir(dst, source, subDir string) error {
+ // Create a temporary directory to store the full source
+ td, err := ioutil.TempDir("", "tf")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(td)
+
+ // Download that into the given directory
+ if err := Get(td, source); err != nil {
+ return err
+ }
+
+ // Make sure the subdir path actually exists
+ sourcePath := filepath.Join(td, subDir)
+ if _, err := os.Stat(sourcePath); err != nil {
+ return fmt.Errorf(
+ "Error downloading %s: %s", source, err)
+ }
+
+ // Copy the subdirectory into our actual destination.
+ if err := os.RemoveAll(dst); err != nil {
+ return err
+ }
+
+ // Make the final destination
+ if err := os.MkdirAll(dst, 0755); err != nil {
+ return err
+ }
+
+ return copyDir(dst, sourcePath, false)
+}
+
+// parseMeta looks for the first meta tag in the given reader that
+// will give us the source URL.
+func (g *HttpGetter) parseMeta(r io.Reader) (string, error) {
+ d := xml.NewDecoder(r)
+ d.CharsetReader = charsetReader
+ d.Strict = false
+ var err error
+ var t xml.Token
+ for {
+ t, err = d.Token()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ return "", err
+ }
+ if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") {
+ return "", nil
+ }
+ if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") {
+ return "", nil
+ }
+ e, ok := t.(xml.StartElement)
+ if !ok || !strings.EqualFold(e.Name.Local, "meta") {
+ continue
+ }
+ if attrValue(e.Attr, "name") != "terraform-get" {
+ continue
+ }
+ if f := attrValue(e.Attr, "content"); f != "" {
+ return f, nil
+ }
+ }
+}
+
+// attrValue returns the attribute value for the case-insensitive key
+// `name', or the empty string if nothing is found.
+func attrValue(attrs []xml.Attr, name string) string {
+ for _, a := range attrs {
+ if strings.EqualFold(a.Name.Local, name) {
+ return a.Value
+ }
+ }
+ return ""
+}
+
+// charsetReader returns a reader for the given charset. Currently
+// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful
+// error which is printed by go get, so the user can find why the package
+// wasn't downloaded if the encoding is not supported. Note that, in
+// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters
+// greater than 0x7f are not rejected).
+func charsetReader(charset string, input io.Reader) (io.Reader, error) {
+ switch strings.ToLower(charset) {
+ case "ascii":
+ return input, nil
+ default:
+ return nil, fmt.Errorf("can't decode XML document using charset %q", charset)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go
new file mode 100644
index 00000000..882e694d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_mock.go
@@ -0,0 +1,52 @@
+package getter
+
+import (
+ "net/url"
+)
+
+// MockGetter is an implementation of Getter that can be used for tests.
+type MockGetter struct {
+ // Proxy, if set, will be called after recording the calls below.
+ // If it isn't set, then the *Err values will be returned.
+ Proxy Getter
+
+ GetCalled bool
+ GetDst string
+ GetURL *url.URL
+ GetErr error
+
+ GetFileCalled bool
+ GetFileDst string
+ GetFileURL *url.URL
+ GetFileErr error
+}
+
+func (g *MockGetter) Get(dst string, u *url.URL) error {
+ g.GetCalled = true
+ g.GetDst = dst
+ g.GetURL = u
+
+ if g.Proxy != nil {
+ return g.Proxy.Get(dst, u)
+ }
+
+ return g.GetErr
+}
+
+func (g *MockGetter) GetFile(dst string, u *url.URL) error {
+ g.GetFileCalled = true
+ g.GetFileDst = dst
+ g.GetFileURL = u
+
+ if g.Proxy != nil {
+ return g.Proxy.GetFile(dst, u)
+ }
+ return g.GetFileErr
+}
+
+func (g *MockGetter) ClientMode(u *url.URL) (ClientMode, error) {
+ if l := len(u.Path); l > 0 && u.Path[l-1:] == "/" {
+ return ClientModeDir, nil
+ }
+ return ClientModeFile, nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go
new file mode 100644
index 00000000..d3bffeb1
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_s3.go
@@ -0,0 +1,243 @@
+package getter
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+// S3Getter is a Getter implementation that will download a module from
+// a S3 bucket.
+type S3Getter struct{}
+
+func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) {
+ // Parse URL
+ region, bucket, path, _, creds, err := g.parseUrl(u)
+ if err != nil {
+ return 0, err
+ }
+
+ // Create client config
+ config := g.getAWSConfig(region, creds)
+ sess := session.New(config)
+ client := s3.New(sess)
+
+ // List the object(s) at the given prefix
+ req := &s3.ListObjectsInput{
+ Bucket: aws.String(bucket),
+ Prefix: aws.String(path),
+ }
+ resp, err := client.ListObjects(req)
+ if err != nil {
+ return 0, err
+ }
+
+ for _, o := range resp.Contents {
+ // Use file mode on exact match.
+ if *o.Key == path {
+ return ClientModeFile, nil
+ }
+
+ // Use dir mode if child keys are found.
+ if strings.HasPrefix(*o.Key, path+"/") {
+ return ClientModeDir, nil
+ }
+ }
+
+ // There was no match, so just return file mode. The download is going
+ // to fail but we will let S3 return the proper error later.
+ return ClientModeFile, nil
+}
+
+func (g *S3Getter) Get(dst string, u *url.URL) error {
+ // Parse URL
+ region, bucket, path, _, creds, err := g.parseUrl(u)
+ if err != nil {
+ return err
+ }
+
+ // Remove destination if it already exists
+ _, err = os.Stat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ if err == nil {
+ // Remove the destination
+ if err := os.RemoveAll(dst); err != nil {
+ return err
+ }
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ config := g.getAWSConfig(region, creds)
+ sess := session.New(config)
+ client := s3.New(sess)
+
+ // List files in path, keep listing until no more objects are found
+ lastMarker := ""
+ hasMore := true
+ for hasMore {
+ req := &s3.ListObjectsInput{
+ Bucket: aws.String(bucket),
+ Prefix: aws.String(path),
+ }
+ if lastMarker != "" {
+ req.Marker = aws.String(lastMarker)
+ }
+
+ resp, err := client.ListObjects(req)
+ if err != nil {
+ return err
+ }
+
+ hasMore = aws.BoolValue(resp.IsTruncated)
+
+ // Get each object storing each file relative to the destination path
+ for _, object := range resp.Contents {
+ lastMarker = aws.StringValue(object.Key)
+ objPath := aws.StringValue(object.Key)
+
+ // If the key ends with a backslash assume it is a directory and ignore
+ if strings.HasSuffix(objPath, "/") {
+ continue
+ }
+
+ // Get the object destination path
+ objDst, err := filepath.Rel(path, objPath)
+ if err != nil {
+ return err
+ }
+ objDst = filepath.Join(dst, objDst)
+
+ if err := g.getObject(client, objDst, bucket, objPath, ""); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (g *S3Getter) GetFile(dst string, u *url.URL) error {
+ region, bucket, path, version, creds, err := g.parseUrl(u)
+ if err != nil {
+ return err
+ }
+
+ config := g.getAWSConfig(region, creds)
+ sess := session.New(config)
+ client := s3.New(sess)
+ return g.getObject(client, dst, bucket, path, version)
+}
+
+func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) error {
+ req := &s3.GetObjectInput{
+ Bucket: aws.String(bucket),
+ Key: aws.String(key),
+ }
+ if version != "" {
+ req.VersionId = aws.String(version)
+ }
+
+ resp, err := client.GetObject(req)
+ if err != nil {
+ return err
+ }
+
+ // Create all the parent directories
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ f, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ _, err = io.Copy(f, resp.Body)
+ return err
+}
+
+func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config {
+ conf := &aws.Config{}
+ if creds == nil {
+ // Grab the metadata URL
+ metadataURL := os.Getenv("AWS_METADATA_URL")
+ if metadataURL == "" {
+ metadataURL = "http://169.254.169.254:80/latest"
+ }
+
+ creds = credentials.NewChainCredentials(
+ []credentials.Provider{
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.New(session.New(&aws.Config{
+ Endpoint: aws.String(metadataURL),
+ })),
+ },
+ })
+ }
+
+ conf.Credentials = creds
+ if region != "" {
+ conf.Region = aws.String(region)
+ }
+
+ return conf
+}
+
+func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) {
+ // Expected host style: s3.amazonaws.com. They always have 3 parts,
+ // although the first may differ if we're accessing a specific region.
+ hostParts := strings.Split(u.Host, ".")
+ if len(hostParts) != 3 {
+ err = fmt.Errorf("URL is not a valid S3 URL")
+ return
+ }
+
+ // Parse the region out of the first part of the host
+ region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3")
+ if region == "" {
+ region = "us-east-1"
+ }
+
+ pathParts := strings.SplitN(u.Path, "/", 3)
+ if len(pathParts) != 3 {
+ err = fmt.Errorf("URL is not a valid S3 URL")
+ return
+ }
+
+ bucket = pathParts[1]
+ path = pathParts[2]
+ version = u.Query().Get("version")
+
+ _, hasAwsId := u.Query()["aws_access_key_id"]
+ _, hasAwsSecret := u.Query()["aws_access_key_secret"]
+ _, hasAwsToken := u.Query()["aws_access_token"]
+ if hasAwsId || hasAwsSecret || hasAwsToken {
+ creds = credentials.NewStaticCredentials(
+ u.Query().Get("aws_access_key_id"),
+ u.Query().Get("aws_access_key_secret"),
+ u.Query().Get("aws_access_token"),
+ )
+ }
+
+ return
+}
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url.go b/vendor/github.com/hashicorp/go-getter/helper/url/url.go
new file mode 100644
index 00000000..02497c25
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/helper/url/url.go
@@ -0,0 +1,14 @@
+package url
+
+import (
+ "net/url"
+)
+
+// Parse parses rawURL into a URL structure.
+// The rawURL may be relative or absolute.
+//
+// Parse is a wrapper for the Go stdlib net/url Parse function, but returns
+// Windows "safe" URLs on Windows platforms.
+func Parse(rawURL string) (*url.URL, error) {
+ return parse(rawURL)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go
new file mode 100644
index 00000000..ed1352a9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package url
+
+import (
+ "net/url"
+)
+
+func parse(rawURL string) (*url.URL, error) {
+ return url.Parse(rawURL)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go
new file mode 100644
index 00000000..4655226f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go
@@ -0,0 +1,40 @@
+package url
+
+import (
+ "fmt"
+ "net/url"
+ "path/filepath"
+ "strings"
+)
+
+func parse(rawURL string) (*url.URL, error) {
+ // Make sure we're using "/" since URLs are "/"-based.
+ rawURL = filepath.ToSlash(rawURL)
+
+ u, err := url.Parse(rawURL)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(rawURL) > 1 && rawURL[1] == ':' {
+ // Assume we're dealing with a drive letter file path where the drive
+ // letter has been parsed into the URL Scheme, and the rest of the path
+ // has been parsed into the URL Path without the leading ':' character.
+ u.Path = fmt.Sprintf("%s:%s", string(rawURL[0]), u.Path)
+ u.Scheme = ""
+ }
+
+ if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") {
+ // Assume we're dealing with a drive letter file path where the drive
+ // letter has been parsed into the URL Host.
+ u.Path = fmt.Sprintf("%s%s", u.Host, u.Path)
+ u.Host = ""
+ }
+
+ // Remove leading slash for absolute file paths.
+ if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' {
+ u.Path = u.Path[1:]
+ }
+
+ return u, err
+}
diff --git a/vendor/github.com/hashicorp/go-getter/netrc.go b/vendor/github.com/hashicorp/go-getter/netrc.go
new file mode 100644
index 00000000..c7f6a3fb
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/netrc.go
@@ -0,0 +1,67 @@
+package getter
+
+import (
+ "fmt"
+ "net/url"
+ "os"
+ "runtime"
+
+ "github.com/bgentry/go-netrc/netrc"
+ "github.com/mitchellh/go-homedir"
+)
+
+// addAuthFromNetrc adds auth information to the URL from the user's
+// netrc file if it can be found. This will only add the auth info
+// if the URL doesn't already have auth info specified and the
+// the username is blank.
+func addAuthFromNetrc(u *url.URL) error {
+ // If the URL already has auth information, do nothing
+ if u.User != nil && u.User.Username() != "" {
+ return nil
+ }
+
+ // Get the netrc file path
+ path := os.Getenv("NETRC")
+ if path == "" {
+ filename := ".netrc"
+ if runtime.GOOS == "windows" {
+ filename = "_netrc"
+ }
+
+ var err error
+ path, err = homedir.Expand("~/" + filename)
+ if err != nil {
+ return err
+ }
+ }
+
+ // If the file is not a file, then do nothing
+ if fi, err := os.Stat(path); err != nil {
+ // File doesn't exist, do nothing
+ if os.IsNotExist(err) {
+ return nil
+ }
+
+ // Some other error!
+ return err
+ } else if fi.IsDir() {
+ // File is directory, ignore
+ return nil
+ }
+
+ // Load up the netrc file
+ net, err := netrc.ParseFile(path)
+ if err != nil {
+ return fmt.Errorf("Error parsing netrc file at %q: %s", path, err)
+ }
+
+ machine := net.FindMachine(u.Host)
+ if machine == nil {
+ // Machine not found, no problem
+ return nil
+ }
+
+ // Set the user info
+ u.User = url.UserPassword(machine.Login, machine.Password)
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go
new file mode 100644
index 00000000..4d5ee3cc
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/source.go
@@ -0,0 +1,36 @@
+package getter
+
+import (
+ "strings"
+)
+
+// SourceDirSubdir takes a source and returns a tuple of the URL without
+// the subdir and the URL with the subdir.
+func SourceDirSubdir(src string) (string, string) {
+ // Calcaulate an offset to avoid accidentally marking the scheme
+ // as the dir.
+ var offset int
+ if idx := strings.Index(src, "://"); idx > -1 {
+ offset = idx + 3
+ }
+
+ // First see if we even have an explicit subdir
+ idx := strings.Index(src[offset:], "//")
+ if idx == -1 {
+ return src, ""
+ }
+
+ idx += offset
+ subdir := src[idx+2:]
+ src = src[:idx]
+
+ // Next, check if we have query parameters and push them onto the
+ // URL.
+ if idx = strings.Index(subdir, "?"); idx > -1 {
+ query := subdir[idx:]
+ subdir = subdir[:idx]
+ src += query
+ }
+
+ return src, subdir
+}
diff --git a/vendor/github.com/hashicorp/go-getter/storage.go b/vendor/github.com/hashicorp/go-getter/storage.go
new file mode 100644
index 00000000..2bc6b9ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/storage.go
@@ -0,0 +1,13 @@
+package getter
+
+// Storage is an interface that knows how to lookup downloaded directories
+// as well as download and update directories from their sources into the
+// proper location.
+type Storage interface {
+ // Dir returns the directory on local disk where the directory source
+ // can be loaded from.
+ Dir(string) (string, bool, error)
+
+ // Get will download and optionally update the given directory.
+ Get(string, string, bool) error
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE
new file mode 100644
index 00000000..82b4de97
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/LICENSE
@@ -0,0 +1,353 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md
new file mode 100644
index 00000000..ead5830f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/README.md
@@ -0,0 +1,97 @@
+# go-multierror
+
+[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis]
+[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
+
+[travis]: https://travis-ci.org/hashicorp/go-multierror
+[godocs]: https://godoc.org/github.com/hashicorp/go-multierror
+
+`go-multierror` is a package for Go that provides a mechanism for
+representing a list of `error` values as a single `error`.
+
+This allows a function in Go to return an `error` that might actually
+be a list of errors. If the caller knows this, they can unwrap the
+list and access the errors. If the caller doesn't know, the error
+formats to a nice human-readable format.
+
+`go-multierror` implements the
+[errwrap](https://github.com/hashicorp/errwrap) interface so that it can
+be used with that library, as well.
+
+## Installation and Docs
+
+Install using `go get github.com/hashicorp/go-multierror`.
+
+Full documentation is available at
+http://godoc.org/github.com/hashicorp/go-multierror
+
+## Usage
+
+go-multierror is easy to use and purposely built to be unobtrusive in
+existing Go applications/libraries that may not be aware of it.
+
+**Building a list of errors**
+
+The `Append` function is used to create a list of errors. This function
+behaves a lot like the Go built-in `append` function: it doesn't matter
+if the first argument is nil, a `multierror.Error`, or any other `error`,
+the function behaves as you would expect.
+
+```go
+var result error
+
+if err := step1(); err != nil {
+ result = multierror.Append(result, err)
+}
+if err := step2(); err != nil {
+ result = multierror.Append(result, err)
+}
+
+return result
+```
+
+**Customizing the formatting of the errors**
+
+By specifying a custom `ErrorFormat`, you can customize the format
+of the `Error() string` function:
+
+```go
+var result *multierror.Error
+
+// ... accumulate errors here, maybe using Append
+
+if result != nil {
+ result.ErrorFormat = func([]error) string {
+ return "errors!"
+ }
+}
+```
+
+**Accessing the list of errors**
+
+`multierror.Error` implements `error` so if the caller doesn't know about
+multierror, it will work just fine. But if you're aware a multierror might
+be returned, you can use type switches to access the list of errors:
+
+```go
+if err := something(); err != nil {
+ if merr, ok := err.(*multierror.Error); ok {
+ // Use merr.Errors
+ }
+}
+```
+
+**Returning a multierror only if there are errors**
+
+If you build a `multierror.Error`, you can use the `ErrorOrNil` function
+to return an `error` implementation only if there are errors to return:
+
+```go
+var result *multierror.Error
+
+// ... accumulate errors here
+
+// Return the `error` only if errors were added to the multierror, otherwise
+// return nil since there are no errors.
+return result.ErrorOrNil()
+```
diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go
new file mode 100644
index 00000000..775b6e75
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/append.go
@@ -0,0 +1,41 @@
+package multierror
+
+// Append is a helper function that will append more errors
+// onto an Error in order to create a larger multi-error.
+//
+// If err is not a multierror.Error, then it will be turned into
+// one. If any of the errs are multierr.Error, they will be flattened
+// one level into err.
+func Append(err error, errs ...error) *Error {
+ switch err := err.(type) {
+ case *Error:
+ // Typed nils can reach here, so initialize if we are nil
+ if err == nil {
+ err = new(Error)
+ }
+
+ // Go through each error and flatten
+ for _, e := range errs {
+ switch e := e.(type) {
+ case *Error:
+ if e != nil {
+ err.Errors = append(err.Errors, e.Errors...)
+ }
+ default:
+ if e != nil {
+ err.Errors = append(err.Errors, e)
+ }
+ }
+ }
+
+ return err
+ default:
+ newErrs := make([]error, 0, len(errs)+1)
+ if err != nil {
+ newErrs = append(newErrs, err)
+ }
+ newErrs = append(newErrs, errs...)
+
+ return Append(&Error{}, newErrs...)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go
new file mode 100644
index 00000000..aab8e9ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/flatten.go
@@ -0,0 +1,26 @@
+package multierror
+
+// Flatten flattens the given error, merging any *Errors together into
+// a single *Error.
+func Flatten(err error) error {
+ // If it isn't an *Error, just return the error as-is
+ if _, ok := err.(*Error); !ok {
+ return err
+ }
+
+ // Otherwise, make the result and flatten away!
+ flatErr := new(Error)
+ flatten(err, flatErr)
+ return flatErr
+}
+
+func flatten(err error, flatErr *Error) {
+ switch err := err.(type) {
+ case *Error:
+ for _, e := range err.Errors {
+ flatten(e, flatErr)
+ }
+ default:
+ flatErr.Errors = append(flatErr.Errors, err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go
new file mode 100644
index 00000000..6c7a3cc9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/format.go
@@ -0,0 +1,27 @@
+package multierror
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ErrorFormatFunc is a function callback that is called by Error to
+// turn the list of errors into a string.
+type ErrorFormatFunc func([]error) string
+
+// ListFormatFunc is a basic formatter that outputs the number of errors
+// that occurred along with a bullet point list of the errors.
+func ListFormatFunc(es []error) string {
+ if len(es) == 1 {
+ return fmt.Sprintf("1 error occurred:\n\n* %s", es[0])
+ }
+
+ points := make([]string, len(es))
+ for i, err := range es {
+ points[i] = fmt.Sprintf("* %s", err)
+ }
+
+ return fmt.Sprintf(
+ "%d errors occurred:\n\n%s",
+ len(es), strings.Join(points, "\n"))
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go
new file mode 100644
index 00000000..2ea08273
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/multierror.go
@@ -0,0 +1,51 @@
+package multierror
+
+import (
+ "fmt"
+)
+
+// Error is an error type to track multiple errors. This is used to
+// accumulate errors in cases and return them as a single "error".
+type Error struct {
+ Errors []error
+ ErrorFormat ErrorFormatFunc
+}
+
+func (e *Error) Error() string {
+ fn := e.ErrorFormat
+ if fn == nil {
+ fn = ListFormatFunc
+ }
+
+ return fn(e.Errors)
+}
+
+// ErrorOrNil returns an error interface if this Error represents
+// a list of errors, or returns nil if the list of errors is empty. This
+// function is useful at the end of accumulation to make sure that the value
+// returned represents the existence of errors.
+func (e *Error) ErrorOrNil() error {
+ if e == nil {
+ return nil
+ }
+ if len(e.Errors) == 0 {
+ return nil
+ }
+
+ return e
+}
+
+func (e *Error) GoString() string {
+ return fmt.Sprintf("*%#v", *e)
+}
+
+// WrappedErrors returns the list of errors that this Error is wrapping.
+// It is an implementatin of the errwrap.Wrapper interface so that
+// multierror.Error can be used with that library.
+//
+// This method is not safe to be called concurrently and is no different
+// than accessing the Errors field directly. It is implementd only to
+// satisfy the errwrap.Wrapper interface.
+func (e *Error) WrappedErrors() []error {
+ return e.Errors
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go
new file mode 100644
index 00000000..5c477abe
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/prefix.go
@@ -0,0 +1,37 @@
+package multierror
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/errwrap"
+)
+
+// Prefix is a helper function that will prefix some text
+// to the given error. If the error is a multierror.Error, then
+// it will be prefixed to each wrapped error.
+//
+// This is useful to use when appending multiple multierrors
+// together in order to give better scoping.
+func Prefix(err error, prefix string) error {
+ if err == nil {
+ return nil
+ }
+
+ format := fmt.Sprintf("%s {{err}}", prefix)
+ switch err := err.(type) {
+ case *Error:
+ // Typed nils can reach here, so initialize if we are nil
+ if err == nil {
+ err = new(Error)
+ }
+
+ // Wrap each of the errors
+ for i, e := range err.Errors {
+ err.Errors[i] = errwrap.Wrapf(format, e)
+ }
+
+ return err
+ default:
+ return errwrap.Wrapf(format, err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE
new file mode 100644
index 00000000..82b4de97
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/LICENSE
@@ -0,0 +1,353 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md
new file mode 100644
index 00000000..28c63556
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/README.md
@@ -0,0 +1,160 @@
+# Go Plugin System over RPC
+
+`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system
+that has been in use by HashiCorp tooling for over 4 years. While initially
+created for [Packer](https://www.packer.io), it is additionally in use by
+[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and
+[Vault](https://www.vaultproject.io).
+
+While the plugin system is over RPC, it is currently only designed to work
+over a local [reliable] network. Plugins over a real network are not supported
+and will lead to unexpected behavior.
+
+This plugin system has been used on millions of machines across many different
+projects and has proven to be battle hardened and ready for production use.
+
+## Features
+
+The HashiCorp plugin system supports a number of features:
+
+**Plugins are Go interface implementations.** This makes writing and consuming
+plugins feel very natural. To a plugin author: you just implement an
+interface as if it were going to run in the same process. For a plugin user:
+you just use and call functions on an interface as if it were in the same
+process. This plugin system handles the communication in between.
+
+**Complex arguments and return values are supported.** This library
+provides APIs for handling complex arguments and return values such
+as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library
+(`MuxBroker`) for creating new connections between the client/server to
+serve additional interfaces or transfer raw data.
+
+**Bidirectional communication.** Because the plugin system supports
+complex arguments, the host process can send it interface implementations
+and the plugin can call back into the host process.
+
+**Built-in Logging.** Any plugins that use the `log` standard library
+will have log data automatically sent to the host process. The host
+process will mirror this output prefixed with the path to the plugin
+binary. This makes debugging with plugins simple.
+
+**Protocol Versioning.** A very basic "protocol version" is supported that
+can be incremented to invalidate any previous plugins. This is useful when
+interface signatures are changing, protocol level changes are necessary,
+etc. When a protocol version is incompatible, a human friendly error
+message is shown to the end user.
+
+**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue
+to use stdout/stderr as usual and the output will get mirrored back to
+the host process. The host process can control what `io.Writer` these
+streams go to to prevent this from happening.
+
+**TTY Preservation.** Plugin subprocesses are connected to the identical
+stdin file descriptor as the host process, allowing software that requires
+a TTY to work. For example, a plugin can execute `ssh` and even though there
+are multiple subprocesses and RPC happening, it will look and act perfectly
+to the end user.
+
+**Host upgrade while a plugin is running.** Plugins can be "reattached"
+so that the host process can be upgraded while the plugin is still running.
+This requires the host/plugin to know this is possible and daemonize
+properly. `NewClient` takes a `ReattachConfig` to determine if and how to
+reattach.
+
+**Cryptographically Secure Plugins.** Plugins can be verified with an expected
+checksum and RPC communications can be configured to use TLS. The host process
+must be properly secured to protect this configuration.
+
+## Architecture
+
+The HashiCorp plugin system works by launching subprocesses and communicating
+over RPC (using standard `net/rpc`). A single connection is made between
+any plugin and the host process, and we use a
+[connection multiplexing](https://github.com/hashicorp/yamux)
+library to multiplex any other connections on top.
+
+This architecture has a number of benefits:
+
+ * Plugins can't crash your host process: A panic in a plugin doesn't
+ panic the plugin user.
+
+ * Plugins are very easy to write: just write a Go application and `go build`.
+ Theoretically you could also use another language as long as it can
+ communicate the Go `net/rpc` protocol but this hasn't yet been tried.
+
+ * Plugins are very easy to install: just put the binary in a location where
+ the host will find it (depends on the host but this library also provides
+ helpers), and the plugin host handles the rest.
+
+ * Plugins can be relatively secure: The plugin only has access to the
+ interfaces and args given to it, not to the entire memory space of the
+ process. More security features are planned (see the coming soon section
+ below).
+
+## Usage
+
+To use the plugin system, you must take the following steps. These are
+high-level steps that must be done. Examples are available in the
+`examples/` directory.
+
+ 1. Choose the interface(s) you want to expose for plugins.
+
+ 2. For each interface, implement an implementation of that interface
+ that communicates over an `*rpc.Client` (from the standard `net/rpc`
+ package) for every function call. Likewise, implement the RPC server
+ struct this communicates to which is then communicating to a real,
+ concrete implementation.
+
+ 3. Create a `Plugin` implementation that knows how to create the RPC
+ client/server for a given plugin type.
+
+ 4. Plugin authors call `plugin.Serve` to serve a plugin from the
+ `main` function.
+
+ 5. Plugin users use `plugin.Client` to launch a subprocess and request
+ an interface implementation over RPC.
+
+That's it! In practice, step 2 is the most tedious and time consuming step.
+Even so, it isn't very difficult and you can see examples in the `examples/`
+directory as well as throughout our various open source projects.
+
+For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin).
+
+## Roadmap
+
+Our plugin system is constantly evolving. As we use the plugin system for
+new projects or for new features in existing projects, we constantly find
+improvements we can make.
+
+At this point in time, the roadmap for the plugin system is:
+
+**Semantic Versioning.** Plugins will be able to implement a semantic version.
+This plugin system will give host processes a system for constraining
+versions. This is in addition to the protocol versioning already present
+which is more for larger underlying changes.
+
+**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter)
+to support automatic download + install of plugins. Paired with cryptographically
+secure plugins (above), we can make this a safe operation for an amazing
+user experience.
+
+## What About Shared Libraries?
+
+When we started using plugins (late 2012, early 2013), plugins over RPC
+were the only option since Go didn't support dynamic library loading. Today,
+Go still doesn't support dynamic library loading, but they do intend to.
+Since 2012, our plugin system has stabilized from millions of users using it,
+and has many benefits we've come to value greatly.
+
+For example, we intend to use this plugin system in
+[Vault](https://www.vaultproject.io), and dynamic library loading will
+simply never be acceptable in Vault for security reasons. That is an extreme
+example, but we believe our library system has more upsides than downsides
+over dynamic library loading and since we've had it built and tested for years,
+we'll likely continue to use it.
+
+Shared libraries have one major advantage over our system which is much
+higher performance. In real world scenarios across our various tools,
+we've never required any more performance out of our plugin system and it
+has seen very high throughput, so this isn't a concern for us at the moment.
+
diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go
new file mode 100644
index 00000000..b69d41b2
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/client.go
@@ -0,0 +1,666 @@
+package plugin
+
+import (
+ "bufio"
+ "crypto/subtle"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode"
+)
+
+// If this is 1, then we've called CleanupClients. This can be used
+// by plugin RPC implementations to change error behavior since you
+// can expected network connection errors at this point. This should be
+// read by using sync/atomic.
+var Killed uint32 = 0
+
+// This is a slice of the "managed" clients which are cleaned up when
+// calling Cleanup
+var managedClients = make([]*Client, 0, 5)
+var managedClientsLock sync.Mutex
+
+// Error types
+var (
+ // ErrProcessNotFound is returned when a client is instantiated to
+ // reattach to an existing process and it isn't found.
+ ErrProcessNotFound = errors.New("Reattachment process not found")
+
+ // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match
+ // the one provided in the SecureConfig.
+ ErrChecksumsDoNotMatch = errors.New("checksums did not match")
+
+ // ErrSecureNoChecksum is returned when an empty checksum is provided to the
+ // SecureConfig.
+ ErrSecureConfigNoChecksum = errors.New("no checksum provided")
+
+ // ErrSecureNoHash is returned when a nil Hash object is provided to the
+ // SecureConfig.
+ ErrSecureConfigNoHash = errors.New("no hash implementation provided")
+
+ // ErrSecureConfigAndReattach is returned when both Reattach and
+ // SecureConfig are set.
+ ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set")
+)
+
+// Client handles the lifecycle of a plugin application. It launches
+// plugins, connects to them, dispenses interface implementations, and handles
+// killing the process.
+//
+// Plugin hosts should use one Client for each plugin executable. To
+// dispense a plugin type, use the `Client.Client` function, and then
+// cal `Dispense`. This awkward API is mostly historical but is used to split
+// the client that deals with subprocess management and the client that
+// does RPC management.
+//
+// See NewClient and ClientConfig for using a Client.
+type Client struct {
+ config *ClientConfig
+ exited bool
+ doneLogging chan struct{}
+ l sync.Mutex
+ address net.Addr
+ process *os.Process
+ client *RPCClient
+}
+
+// ClientConfig is the configuration used to initialize a new
+// plugin client. After being used to initialize a plugin client,
+// that configuration must not be modified again.
+type ClientConfig struct {
+ // HandshakeConfig is the configuration that must match servers.
+ HandshakeConfig
+
+ // Plugins are the plugins that can be consumed.
+ Plugins map[string]Plugin
+
+ // One of the following must be set, but not both.
+ //
+ // Cmd is the unstarted subprocess for starting the plugin. If this is
+ // set, then the Client starts the plugin process on its own and connects
+ // to it.
+ //
+ // Reattach is configuration for reattaching to an existing plugin process
+ // that is already running. This isn't common.
+ Cmd *exec.Cmd
+ Reattach *ReattachConfig
+
+ // SecureConfig is configuration for verifying the integrity of the
+ // executable. It can not be used with Reattach.
+ SecureConfig *SecureConfig
+
+ // TLSConfig is used to enable TLS on the RPC client.
+ TLSConfig *tls.Config
+
+ // Managed represents if the client should be managed by the
+ // plugin package or not. If true, then by calling CleanupClients,
+ // it will automatically be cleaned up. Otherwise, the client
+ // user is fully responsible for making sure to Kill all plugin
+ // clients. By default the client is _not_ managed.
+ Managed bool
+
+ // The minimum and maximum port to use for communicating with
+ // the subprocess. If not set, this defaults to 10,000 and 25,000
+ // respectively.
+ MinPort, MaxPort uint
+
+ // StartTimeout is the timeout to wait for the plugin to say it
+ // has started successfully.
+ StartTimeout time.Duration
+
+ // If non-nil, then the stderr of the client will be written to here
+ // (as well as the log). This is the original os.Stderr of the subprocess.
+ // This isn't the output of synced stderr.
+ Stderr io.Writer
+
+ // SyncStdout, SyncStderr can be set to override the
+ // respective os.Std* values in the plugin. Care should be taken to
+ // avoid races here. If these are nil, then this will automatically be
+ // hooked up to os.Stdin, Stdout, and Stderr, respectively.
+ //
+ // If the default values (nil) are used, then this package will not
+ // sync any of these streams.
+ SyncStdout io.Writer
+ SyncStderr io.Writer
+}
+
+// ReattachConfig is used to configure a client to reattach to an
+// already-running plugin process. You can retrieve this information by
+// calling ReattachConfig on Client.
+type ReattachConfig struct {
+ Addr net.Addr
+ Pid int
+}
+
+// SecureConfig is used to configure a client to verify the integrity of an
+// executable before running. It does this by verifying the checksum is
+// expected. Hash is used to specify the hashing method to use when checksumming
+// the file. The configuration is verified by the client by calling the
+// SecureConfig.Check() function.
+//
+// The host process should ensure the checksum was provided by a trusted and
+// authoritative source. The binary should be installed in such a way that it
+// can not be modified by an unauthorized user between the time of this check
+// and the time of execution.
+type SecureConfig struct {
+ Checksum []byte
+ Hash hash.Hash
+}
+
+// Check takes the filepath to an executable and returns true if the checksum of
+// the file matches the checksum provided in the SecureConfig.
+func (s *SecureConfig) Check(filePath string) (bool, error) {
+ if len(s.Checksum) == 0 {
+ return false, ErrSecureConfigNoChecksum
+ }
+
+ if s.Hash == nil {
+ return false, ErrSecureConfigNoHash
+ }
+
+ file, err := os.Open(filePath)
+ if err != nil {
+ return false, err
+ }
+ defer file.Close()
+
+ _, err = io.Copy(s.Hash, file)
+ if err != nil {
+ return false, err
+ }
+
+ sum := s.Hash.Sum(nil)
+
+ return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil
+}
+
+// This makes sure all the managed subprocesses are killed and properly
+// logged. This should be called before the parent process running the
+// plugins exits.
+//
+// This must only be called _once_.
+func CleanupClients() {
+ // Set the killed to true so that we don't get unexpected panics
+ atomic.StoreUint32(&Killed, 1)
+
+ // Kill all the managed clients in parallel and use a WaitGroup
+ // to wait for them all to finish up.
+ var wg sync.WaitGroup
+ managedClientsLock.Lock()
+ for _, client := range managedClients {
+ wg.Add(1)
+
+ go func(client *Client) {
+ client.Kill()
+ wg.Done()
+ }(client)
+ }
+ managedClientsLock.Unlock()
+
+ log.Println("[DEBUG] plugin: waiting for all plugin processes to complete...")
+ wg.Wait()
+}
+
+// Creates a new plugin client which manages the lifecycle of an external
+// plugin and gets the address for the RPC connection.
+//
+// The client must be cleaned up at some point by calling Kill(). If
+// the client is a managed client (created with NewManagedClient) you
+// can just call CleanupClients at the end of your program and they will
+// be properly cleaned.
+func NewClient(config *ClientConfig) (c *Client) {
+ if config.MinPort == 0 && config.MaxPort == 0 {
+ config.MinPort = 10000
+ config.MaxPort = 25000
+ }
+
+ if config.StartTimeout == 0 {
+ config.StartTimeout = 1 * time.Minute
+ }
+
+ if config.Stderr == nil {
+ config.Stderr = ioutil.Discard
+ }
+
+ if config.SyncStdout == nil {
+ config.SyncStdout = ioutil.Discard
+ }
+ if config.SyncStderr == nil {
+ config.SyncStderr = ioutil.Discard
+ }
+
+ c = &Client{config: config}
+ if config.Managed {
+ managedClientsLock.Lock()
+ managedClients = append(managedClients, c)
+ managedClientsLock.Unlock()
+ }
+
+ return
+}
+
+// Client returns an RPC client for the plugin.
+//
+// Subsequent calls to this will return the same RPC client.
+func (c *Client) Client() (*RPCClient, error) {
+ addr, err := c.Start()
+ if err != nil {
+ return nil, err
+ }
+
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.client != nil {
+ return c.client, nil
+ }
+
+ // Connect to the client
+ conn, err := net.Dial(addr.Network(), addr.String())
+ if err != nil {
+ return nil, err
+ }
+ if tcpConn, ok := conn.(*net.TCPConn); ok {
+ // Make sure to set keep alive so that the connection doesn't die
+ tcpConn.SetKeepAlive(true)
+ }
+
+ if c.config.TLSConfig != nil {
+ conn = tls.Client(conn, c.config.TLSConfig)
+ }
+
+ // Create the actual RPC client
+ c.client, err = NewRPCClient(conn, c.config.Plugins)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Begin the stream syncing so that stdin, out, err work properly
+ err = c.client.SyncStreams(
+ c.config.SyncStdout,
+ c.config.SyncStderr)
+ if err != nil {
+ c.client.Close()
+ c.client = nil
+ return nil, err
+ }
+
+ return c.client, nil
+}
+
+// Tells whether or not the underlying process has exited.
+func (c *Client) Exited() bool {
+ c.l.Lock()
+ defer c.l.Unlock()
+ return c.exited
+}
+
+// End the executing subprocess (if it is running) and perform any cleanup
+// tasks necessary such as capturing any remaining logs and so on.
+//
+// This method blocks until the process successfully exits.
+//
+// This method can safely be called multiple times.
+func (c *Client) Kill() {
+ // Grab a lock to read some private fields.
+ c.l.Lock()
+ process := c.process
+ addr := c.address
+ doneCh := c.doneLogging
+ c.l.Unlock()
+
+ // If there is no process, we never started anything. Nothing to kill.
+ if process == nil {
+ return
+ }
+
+ // We need to check for address here. It is possible that the plugin
+ // started (process != nil) but has no address (addr == nil) if the
+ // plugin failed at startup. If we do have an address, we need to close
+ // the plugin net connections.
+ graceful := false
+ if addr != nil {
+ // Close the client to cleanly exit the process.
+ client, err := c.Client()
+ if err == nil {
+ err = client.Close()
+
+ // If there is no error, then we attempt to wait for a graceful
+ // exit. If there was an error, we assume that graceful cleanup
+ // won't happen and just force kill.
+ graceful = err == nil
+ if err != nil {
+ // If there was an error just log it. We're going to force
+ // kill in a moment anyways.
+ log.Printf(
+ "[WARN] plugin: error closing client during Kill: %s", err)
+ }
+ }
+ }
+
+ // If we're attempting a graceful exit, then we wait for a short period
+ // of time to allow that to happen. To wait for this we just wait on the
+ // doneCh which would be closed if the process exits.
+ if graceful {
+ select {
+ case <-doneCh:
+ return
+ case <-time.After(250 * time.Millisecond):
+ }
+ }
+
+ // If graceful exiting failed, just kill it
+ process.Kill()
+
+ // Wait for the client to finish logging so we have a complete log
+ <-doneCh
+}
+
+// Starts the underlying subprocess, communicating with it to negotiate
+// a port for RPC connections, and returning the address to connect via RPC.
+//
+// This method is safe to call multiple times. Subsequent calls have no effect.
+// Once a client has been started once, it cannot be started again, even if
+// it was killed.
+func (c *Client) Start() (addr net.Addr, err error) {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.address != nil {
+ return c.address, nil
+ }
+
+ // If one of cmd or reattach isn't set, then it is an error. We wrap
+ // this in a {} for scoping reasons, and hopeful that the escape
+ // analysis will pop the stock here.
+ {
+ cmdSet := c.config.Cmd != nil
+ attachSet := c.config.Reattach != nil
+ secureSet := c.config.SecureConfig != nil
+ if cmdSet == attachSet {
+ return nil, fmt.Errorf("Only one of Cmd or Reattach must be set")
+ }
+
+ if secureSet && attachSet {
+ return nil, ErrSecureConfigAndReattach
+ }
+ }
+
+ // Create the logging channel for when we kill
+ c.doneLogging = make(chan struct{})
+
+ if c.config.Reattach != nil {
+ // Verify the process still exists. If not, then it is an error
+ p, err := os.FindProcess(c.config.Reattach.Pid)
+ if err != nil {
+ return nil, err
+ }
+
+ // Attempt to connect to the addr since on Unix systems FindProcess
+ // doesn't actually return an error if it can't find the process.
+ conn, err := net.Dial(
+ c.config.Reattach.Addr.Network(),
+ c.config.Reattach.Addr.String())
+ if err != nil {
+ p.Kill()
+ return nil, ErrProcessNotFound
+ }
+ conn.Close()
+
+ // Goroutine to mark exit status
+ go func(pid int) {
+ // Wait for the process to die
+ pidWait(pid)
+
+ // Log so we can see it
+ log.Printf("[DEBUG] plugin: reattached plugin process exited\n")
+
+ // Mark it
+ c.l.Lock()
+ defer c.l.Unlock()
+ c.exited = true
+
+ // Close the logging channel since that doesn't work on reattach
+ close(c.doneLogging)
+ }(p.Pid)
+
+ // Set the address and process
+ c.address = c.config.Reattach.Addr
+ c.process = p
+
+ return c.address, nil
+ }
+
+ env := []string{
+ fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue),
+ fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort),
+ fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort),
+ }
+
+ stdout_r, stdout_w := io.Pipe()
+ stderr_r, stderr_w := io.Pipe()
+
+ cmd := c.config.Cmd
+ cmd.Env = append(cmd.Env, os.Environ()...)
+ cmd.Env = append(cmd.Env, env...)
+ cmd.Stdin = os.Stdin
+ cmd.Stderr = stderr_w
+ cmd.Stdout = stdout_w
+
+ if c.config.SecureConfig != nil {
+ if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil {
+ return nil, fmt.Errorf("error verifying checksum: %s", err)
+ } else if !ok {
+ return nil, ErrChecksumsDoNotMatch
+ }
+ }
+
+ log.Printf("[DEBUG] plugin: starting plugin: %s %#v", cmd.Path, cmd.Args)
+ err = cmd.Start()
+ if err != nil {
+ return
+ }
+
+ // Set the process
+ c.process = cmd.Process
+
+ // Make sure the command is properly cleaned up if there is an error
+ defer func() {
+ r := recover()
+
+ if err != nil || r != nil {
+ cmd.Process.Kill()
+ }
+
+ if r != nil {
+ panic(r)
+ }
+ }()
+
+ // Start goroutine to wait for process to exit
+ exitCh := make(chan struct{})
+ go func() {
+ // Make sure we close the write end of our stderr/stdout so
+ // that the readers send EOF properly.
+ defer stderr_w.Close()
+ defer stdout_w.Close()
+
+ // Wait for the command to end.
+ cmd.Wait()
+
+ // Log and make sure to flush the logs write away
+ log.Printf("[DEBUG] plugin: %s: plugin process exited\n", cmd.Path)
+ os.Stderr.Sync()
+
+ // Mark that we exited
+ close(exitCh)
+
+ // Set that we exited, which takes a lock
+ c.l.Lock()
+ defer c.l.Unlock()
+ c.exited = true
+ }()
+
+ // Start goroutine that logs the stderr
+ go c.logStderr(stderr_r)
+
+ // Start a goroutine that is going to be reading the lines
+ // out of stdout
+ linesCh := make(chan []byte)
+ go func() {
+ defer close(linesCh)
+
+ buf := bufio.NewReader(stdout_r)
+ for {
+ line, err := buf.ReadBytes('\n')
+ if line != nil {
+ linesCh <- line
+ }
+
+ if err == io.EOF {
+ return
+ }
+ }
+ }()
+
+ // Make sure after we exit we read the lines from stdout forever
+ // so they don't block since it is an io.Pipe
+ defer func() {
+ go func() {
+ for _ = range linesCh {
+ }
+ }()
+ }()
+
+ // Some channels for the next step
+ timeout := time.After(c.config.StartTimeout)
+
+ // Start looking for the address
+ log.Printf("[DEBUG] plugin: waiting for RPC address for: %s", cmd.Path)
+ select {
+ case <-timeout:
+ err = errors.New("timeout while waiting for plugin to start")
+ case <-exitCh:
+ err = errors.New("plugin exited before we could connect")
+ case lineBytes := <-linesCh:
+ // Trim the line and split by "|" in order to get the parts of
+ // the output.
+ line := strings.TrimSpace(string(lineBytes))
+ parts := strings.SplitN(line, "|", 4)
+ if len(parts) < 4 {
+ err = fmt.Errorf(
+ "Unrecognized remote plugin message: %s\n\n"+
+ "This usually means that the plugin is either invalid or simply\n"+
+ "needs to be recompiled to support the latest protocol.", line)
+ return
+ }
+
+ // Check the core protocol. Wrapped in a {} for scoping.
+ {
+ var coreProtocol int64
+ coreProtocol, err = strconv.ParseInt(parts[0], 10, 0)
+ if err != nil {
+ err = fmt.Errorf("Error parsing core protocol version: %s", err)
+ return
+ }
+
+ if int(coreProtocol) != CoreProtocolVersion {
+ err = fmt.Errorf("Incompatible core API version with plugin. "+
+ "Plugin version: %s, Ours: %d\n\n"+
+ "To fix this, the plugin usually only needs to be recompiled.\n"+
+ "Please report this to the plugin author.", parts[0], CoreProtocolVersion)
+ return
+ }
+ }
+
+ // Parse the protocol version
+ var protocol int64
+ protocol, err = strconv.ParseInt(parts[1], 10, 0)
+ if err != nil {
+ err = fmt.Errorf("Error parsing protocol version: %s", err)
+ return
+ }
+
+ // Test the API version
+ if uint(protocol) != c.config.ProtocolVersion {
+ err = fmt.Errorf("Incompatible API version with plugin. "+
+ "Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion)
+ return
+ }
+
+ switch parts[2] {
+ case "tcp":
+ addr, err = net.ResolveTCPAddr("tcp", parts[3])
+ case "unix":
+ addr, err = net.ResolveUnixAddr("unix", parts[3])
+ default:
+ err = fmt.Errorf("Unknown address type: %s", parts[3])
+ }
+ }
+
+ c.address = addr
+ return
+}
+
+// ReattachConfig returns the information that must be provided to NewClient
+// to reattach to the plugin process that this client started. This is
+// useful for plugins that detach from their parent process.
+//
+// If this returns nil then the process hasn't been started yet. Please
+// call Start or Client before calling this.
+func (c *Client) ReattachConfig() *ReattachConfig {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.address == nil {
+ return nil
+ }
+
+ if c.config.Cmd != nil && c.config.Cmd.Process == nil {
+ return nil
+ }
+
+ // If we connected via reattach, just return the information as-is
+ if c.config.Reattach != nil {
+ return c.config.Reattach
+ }
+
+ return &ReattachConfig{
+ Addr: c.address,
+ Pid: c.config.Cmd.Process.Pid,
+ }
+}
+
+func (c *Client) logStderr(r io.Reader) {
+ bufR := bufio.NewReader(r)
+ for {
+ line, err := bufR.ReadString('\n')
+ if line != "" {
+ c.config.Stderr.Write([]byte(line))
+
+ line = strings.TrimRightFunc(line, unicode.IsSpace)
+ log.Printf("[DEBUG] plugin: %s: %s", filepath.Base(c.config.Cmd.Path), line)
+ }
+
+ if err == io.EOF {
+ break
+ }
+ }
+
+ // Flag that we've completed logging for others
+ close(c.doneLogging)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go
new file mode 100644
index 00000000..d22c566e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/discover.go
@@ -0,0 +1,28 @@
+package plugin
+
+import (
+ "path/filepath"
+)
+
+// Discover discovers plugins that are in a given directory.
+//
+// The directory doesn't need to be absolute. For example, "." will work fine.
+//
+// This currently assumes any file matching the glob is a plugin.
+// In the future this may be smarter about checking that a file is
+// executable and so on.
+//
+// TODO: test
+func Discover(glob, dir string) ([]string, error) {
+ var err error
+
+ // Make the directory absolute if it isn't already
+ if !filepath.IsAbs(dir) {
+ dir, err = filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return filepath.Glob(filepath.Join(dir, glob))
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go
new file mode 100644
index 00000000..22a7baa6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/error.go
@@ -0,0 +1,24 @@
+package plugin
+
+// This is a type that wraps error types so that they can be messaged
+// across RPC channels. Since "error" is an interface, we can't always
+// gob-encode the underlying structure. This is a valid error interface
+// implementer that we will push across.
+type BasicError struct {
+ Message string
+}
+
+// NewBasicError is used to create a BasicError.
+//
+// err is allowed to be nil.
+func NewBasicError(err error) *BasicError {
+ if err == nil {
+ return nil
+ }
+
+ return &BasicError{err.Error()}
+}
+
+func (e *BasicError) Error() string {
+ return e.Message
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go
new file mode 100644
index 00000000..01c45ad7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go
@@ -0,0 +1,204 @@
+package plugin
+
+import (
+ "encoding/binary"
+ "fmt"
+ "log"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/hashicorp/yamux"
+)
+
+// MuxBroker is responsible for brokering multiplexed connections by unique ID.
+//
+// It is used by plugins to multiplex multiple RPC connections and data
+// streams on top of a single connection between the plugin process and the
+// host process.
+//
+// This allows a plugin to request a channel with a specific ID to connect to
+// or accept a connection from, and the broker handles the details of
+// holding these channels open while they're being negotiated.
+//
+// The Plugin interface has access to these for both Server and Client.
+// The broker can be used by either (optionally) to reserve and connect to
+// new multiplexed streams. This is useful for complex args and return values,
+// or anything else you might need a data stream for.
+type MuxBroker struct {
+ nextId uint32
+ session *yamux.Session
+ streams map[uint32]*muxBrokerPending
+
+ sync.Mutex
+}
+
+type muxBrokerPending struct {
+ ch chan net.Conn
+ doneCh chan struct{}
+}
+
+func newMuxBroker(s *yamux.Session) *MuxBroker {
+ return &MuxBroker{
+ session: s,
+ streams: make(map[uint32]*muxBrokerPending),
+ }
+}
+
+// Accept accepts a connection by ID.
+//
+// This should not be called multiple times with the same ID at one time.
+func (m *MuxBroker) Accept(id uint32) (net.Conn, error) {
+ var c net.Conn
+ p := m.getStream(id)
+ select {
+ case c = <-p.ch:
+ close(p.doneCh)
+ case <-time.After(5 * time.Second):
+ m.Lock()
+ defer m.Unlock()
+ delete(m.streams, id)
+
+ return nil, fmt.Errorf("timeout waiting for accept")
+ }
+
+ // Ack our connection
+ if err := binary.Write(c, binary.LittleEndian, id); err != nil {
+ c.Close()
+ return nil, err
+ }
+
+ return c, nil
+}
+
+// AcceptAndServe is used to accept a specific stream ID and immediately
+// serve an RPC server on that stream ID. This is used to easily serve
+// complex arguments.
+//
+// The served interface is always registered to the "Plugin" name.
+func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) {
+ conn, err := m.Accept(id)
+ if err != nil {
+ log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
+ return
+ }
+
+ serve(conn, "Plugin", v)
+}
+
+// Close closes the connection and all sub-connections.
+func (m *MuxBroker) Close() error {
+ return m.session.Close()
+}
+
+// Dial opens a connection by ID.
+func (m *MuxBroker) Dial(id uint32) (net.Conn, error) {
+ // Open the stream
+ stream, err := m.session.OpenStream()
+ if err != nil {
+ return nil, err
+ }
+
+ // Write the stream ID onto the wire.
+ if err := binary.Write(stream, binary.LittleEndian, id); err != nil {
+ stream.Close()
+ return nil, err
+ }
+
+ // Read the ack that we connected. Then we're off!
+ var ack uint32
+ if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil {
+ stream.Close()
+ return nil, err
+ }
+ if ack != id {
+ stream.Close()
+ return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id)
+ }
+
+ return stream, nil
+}
+
+// NextId returns a unique ID to use next.
+//
+// It is possible for very long-running plugin hosts to wrap this value,
+// though it would require a very large amount of RPC calls. In practice
+// we've never seen it happen.
+func (m *MuxBroker) NextId() uint32 {
+ return atomic.AddUint32(&m.nextId, 1)
+}
+
+// Run starts the brokering and should be executed in a goroutine, since it
+// blocks forever, or until the session closes.
+//
+// Uses of MuxBroker never need to call this. It is called internally by
+// the plugin host/client.
+func (m *MuxBroker) Run() {
+ for {
+ stream, err := m.session.AcceptStream()
+ if err != nil {
+ // Once we receive an error, just exit
+ break
+ }
+
+ // Read the stream ID from the stream
+ var id uint32
+ if err := binary.Read(stream, binary.LittleEndian, &id); err != nil {
+ stream.Close()
+ continue
+ }
+
+ // Initialize the waiter
+ p := m.getStream(id)
+ select {
+ case p.ch <- stream:
+ default:
+ }
+
+ // Wait for a timeout
+ go m.timeoutWait(id, p)
+ }
+}
+
+func (m *MuxBroker) getStream(id uint32) *muxBrokerPending {
+ m.Lock()
+ defer m.Unlock()
+
+ p, ok := m.streams[id]
+ if ok {
+ return p
+ }
+
+ m.streams[id] = &muxBrokerPending{
+ ch: make(chan net.Conn, 1),
+ doneCh: make(chan struct{}),
+ }
+ return m.streams[id]
+}
+
+func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) {
+ // Wait for the stream to either be picked up and connected, or
+ // for a timeout.
+ timeout := false
+ select {
+ case <-p.doneCh:
+ case <-time.After(5 * time.Second):
+ timeout = true
+ }
+
+ m.Lock()
+ defer m.Unlock()
+
+ // Delete the stream so no one else can grab it
+ delete(m.streams, id)
+
+ // If we timed out, then check if we have a channel in the buffer,
+ // and if so, close it.
+ if timeout {
+ select {
+ case s := <-p.ch:
+ s.Close()
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go
new file mode 100644
index 00000000..37c8fd65
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/plugin.go
@@ -0,0 +1,25 @@
+// The plugin package exposes functions and helpers for communicating to
+// plugins which are implemented as standalone binary applications.
+//
+// plugin.Client fully manages the lifecycle of executing the application,
+// connecting to it, and returning the RPC client for dispensing plugins.
+//
+// plugin.Serve fully manages listeners to expose an RPC server from a binary
+// that plugin.Client can connect to.
+package plugin
+
+import (
+ "net/rpc"
+)
+
+// Plugin is the interface that is implemented to serve/connect to an
+// inteface implementation.
+type Plugin interface {
+ // Server should return the RPC server compatible struct to serve
+ // the methods that the Client calls over net/rpc.
+ Server(*MuxBroker) (interface{}, error)
+
+ // Client returns an interface implementation for the plugin you're
+ // serving that communicates to the server end of the plugin.
+ Client(*MuxBroker, *rpc.Client) (interface{}, error)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go
new file mode 100644
index 00000000..88c999a5
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/process.go
@@ -0,0 +1,24 @@
+package plugin
+
+import (
+ "time"
+)
+
+// pidAlive checks whether a pid is alive.
+func pidAlive(pid int) bool {
+ return _pidAlive(pid)
+}
+
+// pidWait blocks for a process to exit.
+func pidWait(pid int) error {
+ ticker := time.NewTicker(1 * time.Second)
+ defer ticker.Stop()
+
+ for range ticker.C {
+ if !pidAlive(pid) {
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go
new file mode 100644
index 00000000..70ba546b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package plugin
+
+import (
+ "os"
+ "syscall"
+)
+
+// _pidAlive tests whether a process is alive or not by sending it Signal 0,
+// since Go otherwise has no way to test this.
+func _pidAlive(pid int) bool {
+ proc, err := os.FindProcess(pid)
+ if err == nil {
+ err = proc.Signal(syscall.Signal(0))
+ }
+
+ return err == nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go
new file mode 100644
index 00000000..9f7b0180
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go
@@ -0,0 +1,29 @@
+package plugin
+
+import (
+ "syscall"
+)
+
+const (
+ // Weird name but matches the MSDN docs
+ exit_STILL_ACTIVE = 259
+
+ processDesiredAccess = syscall.STANDARD_RIGHTS_READ |
+ syscall.PROCESS_QUERY_INFORMATION |
+ syscall.SYNCHRONIZE
+)
+
+// _pidAlive tests whether a process is alive or not
+func _pidAlive(pid int) bool {
+ h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid))
+ if err != nil {
+ return false
+ }
+
+ var ec uint32
+ if e := syscall.GetExitCodeProcess(h, &ec); e != nil {
+ return false
+ }
+
+ return ec == exit_STILL_ACTIVE
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go
new file mode 100644
index 00000000..29f9bf06
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go
@@ -0,0 +1,123 @@
+package plugin
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "net/rpc"
+
+ "github.com/hashicorp/yamux"
+)
+
+// RPCClient connects to an RPCServer over net/rpc to dispense plugin types.
+type RPCClient struct {
+ broker *MuxBroker
+ control *rpc.Client
+ plugins map[string]Plugin
+
+ // These are the streams used for the various stdout/err overrides
+ stdout, stderr net.Conn
+}
+
+// NewRPCClient creates a client from an already-open connection-like value.
+// Dial is typically used instead.
+func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) {
+ // Create the yamux client so we can multiplex
+ mux, err := yamux.Client(conn, nil)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Connect to the control stream.
+ control, err := mux.Open()
+ if err != nil {
+ mux.Close()
+ return nil, err
+ }
+
+ // Connect stdout, stderr streams
+ stdstream := make([]net.Conn, 2)
+ for i, _ := range stdstream {
+ stdstream[i], err = mux.Open()
+ if err != nil {
+ mux.Close()
+ return nil, err
+ }
+ }
+
+ // Create the broker and start it up
+ broker := newMuxBroker(mux)
+ go broker.Run()
+
+ // Build the client using our broker and control channel.
+ return &RPCClient{
+ broker: broker,
+ control: rpc.NewClient(control),
+ plugins: plugins,
+ stdout: stdstream[0],
+ stderr: stdstream[1],
+ }, nil
+}
+
+// SyncStreams should be called to enable syncing of stdout,
+// stderr with the plugin.
+//
+// This will return immediately and the syncing will continue to happen
+// in the background. You do not need to launch this in a goroutine itself.
+//
+// This should never be called multiple times.
+func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error {
+ go copyStream("stdout", stdout, c.stdout)
+ go copyStream("stderr", stderr, c.stderr)
+ return nil
+}
+
+// Close closes the connection. The client is no longer usable after this
+// is called.
+func (c *RPCClient) Close() error {
+ // Call the control channel and ask it to gracefully exit. If this
+ // errors, then we save it so that we always return an error but we
+ // want to try to close the other channels anyways.
+ var empty struct{}
+ returnErr := c.control.Call("Control.Quit", true, &empty)
+
+ // Close the other streams we have
+ if err := c.control.Close(); err != nil {
+ return err
+ }
+ if err := c.stdout.Close(); err != nil {
+ return err
+ }
+ if err := c.stderr.Close(); err != nil {
+ return err
+ }
+ if err := c.broker.Close(); err != nil {
+ return err
+ }
+
+ // Return back the error we got from Control.Quit. This is very important
+ // since we MUST return non-nil error if this fails so that Client.Kill
+ // will properly try a process.Kill.
+ return returnErr
+}
+
+func (c *RPCClient) Dispense(name string) (interface{}, error) {
+ p, ok := c.plugins[name]
+ if !ok {
+ return nil, fmt.Errorf("unknown plugin type: %s", name)
+ }
+
+ var id uint32
+ if err := c.control.Call(
+ "Dispenser.Dispense", name, &id); err != nil {
+ return nil, err
+ }
+
+ conn, err := c.broker.Dial(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.Client(c.broker, rpc.NewClient(conn))
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go
new file mode 100644
index 00000000..3984dc89
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go
@@ -0,0 +1,185 @@
+package plugin
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/rpc"
+ "sync"
+
+ "github.com/hashicorp/yamux"
+)
+
+// RPCServer listens for network connections and then dispenses interface
+// implementations over net/rpc.
+//
+// After setting the fields below, they shouldn't be read again directly
+// from the structure which may be reading/writing them concurrently.
+type RPCServer struct {
+ Plugins map[string]Plugin
+
+ // Stdout, Stderr are what this server will use instead of the
+ // normal stdin/out/err. This is because due to the multi-process nature
+ // of our plugin system, we can't use the normal process values so we
+ // make our own custom one we pipe across.
+ Stdout io.Reader
+ Stderr io.Reader
+
+ // DoneCh should be set to a non-nil channel that will be closed
+ // when the control requests the RPC server to end.
+ DoneCh chan<- struct{}
+
+ lock sync.Mutex
+}
+
+// Accept accepts connections on a listener and serves requests for
+// each incoming connection. Accept blocks; the caller typically invokes
+// it in a go statement.
+func (s *RPCServer) Accept(lis net.Listener) {
+ for {
+ conn, err := lis.Accept()
+ if err != nil {
+ log.Printf("[ERR] plugin: plugin server: %s", err)
+ return
+ }
+
+ go s.ServeConn(conn)
+ }
+}
+
+// ServeConn runs a single connection.
+//
+// ServeConn blocks, serving the connection until the client hangs up.
+func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) {
+ // First create the yamux server to wrap this connection
+ mux, err := yamux.Server(conn, nil)
+ if err != nil {
+ conn.Close()
+ log.Printf("[ERR] plugin: error creating yamux server: %s", err)
+ return
+ }
+
+ // Accept the control connection
+ control, err := mux.Accept()
+ if err != nil {
+ mux.Close()
+ if err != io.EOF {
+ log.Printf("[ERR] plugin: error accepting control connection: %s", err)
+ }
+
+ return
+ }
+
+ // Connect the stdstreams (in, out, err)
+ stdstream := make([]net.Conn, 2)
+ for i, _ := range stdstream {
+ stdstream[i], err = mux.Accept()
+ if err != nil {
+ mux.Close()
+ log.Printf("[ERR] plugin: accepting stream %d: %s", i, err)
+ return
+ }
+ }
+
+ // Copy std streams out to the proper place
+ go copyStream("stdout", stdstream[0], s.Stdout)
+ go copyStream("stderr", stdstream[1], s.Stderr)
+
+ // Create the broker and start it up
+ broker := newMuxBroker(mux)
+ go broker.Run()
+
+ // Use the control connection to build the dispenser and serve the
+ // connection.
+ server := rpc.NewServer()
+ server.RegisterName("Control", &controlServer{
+ server: s,
+ })
+ server.RegisterName("Dispenser", &dispenseServer{
+ broker: broker,
+ plugins: s.Plugins,
+ })
+ server.ServeConn(control)
+}
+
+// done is called internally by the control server to trigger the
+// doneCh to close which is listened to by the main process to cleanly
+// exit.
+func (s *RPCServer) done() {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ if s.DoneCh != nil {
+ close(s.DoneCh)
+ s.DoneCh = nil
+ }
+}
+
+// dispenseServer dispenses variousinterface implementations for Terraform.
+type controlServer struct {
+ server *RPCServer
+}
+
+func (c *controlServer) Quit(
+ null bool, response *struct{}) error {
+ // End the server
+ c.server.done()
+
+ // Always return true
+ *response = struct{}{}
+
+ return nil
+}
+
+// dispenseServer dispenses variousinterface implementations for Terraform.
+type dispenseServer struct {
+ broker *MuxBroker
+ plugins map[string]Plugin
+}
+
+func (d *dispenseServer) Dispense(
+ name string, response *uint32) error {
+ // Find the function to create this implementation
+ p, ok := d.plugins[name]
+ if !ok {
+ return fmt.Errorf("unknown plugin type: %s", name)
+ }
+
+ // Create the implementation first so we know if there is an error.
+ impl, err := p.Server(d.broker)
+ if err != nil {
+ // We turn the error into an errors error so that it works across RPC
+ return errors.New(err.Error())
+ }
+
+ // Reserve an ID for our implementation
+ id := d.broker.NextId()
+ *response = id
+
+ // Run the rest in a goroutine since it can only happen once this RPC
+ // call returns. We wait for a connection for the plugin implementation
+ // and serve it.
+ go func() {
+ conn, err := d.broker.Accept(id)
+ if err != nil {
+ log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err)
+ return
+ }
+
+ serve(conn, "Plugin", impl)
+ }()
+
+ return nil
+}
+
+func serve(conn io.ReadWriteCloser, name string, v interface{}) {
+ server := rpc.NewServer()
+ if err := server.RegisterName(name, v); err != nil {
+ log.Printf("[ERR] go-plugin: plugin dispense error: %s", err)
+ return
+ }
+
+ server.ServeConn(conn)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go
new file mode 100644
index 00000000..782a4e11
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/server.go
@@ -0,0 +1,235 @@
+package plugin
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "os"
+ "os/signal"
+ "runtime"
+ "strconv"
+ "sync/atomic"
+)
+
+// CoreProtocolVersion is the ProtocolVersion of the plugin system itself.
+// We will increment this whenever we change any protocol behavior. This
+// will invalidate any prior plugins but will at least allow us to iterate
+// on the core in a safe way. We will do our best to do this very
+// infrequently.
+const CoreProtocolVersion = 1
+
+// HandshakeConfig is the configuration used by client and servers to
+// handshake before starting a plugin connection. This is embedded by
+// both ServeConfig and ClientConfig.
+//
+// In practice, the plugin host creates a HandshakeConfig that is exported
+// and plugins then can easily consume it.
+type HandshakeConfig struct {
+ // ProtocolVersion is the version that clients must match on to
+ // agree they can communicate. This should match the ProtocolVersion
+ // set on ClientConfig when using a plugin.
+ ProtocolVersion uint
+
+ // MagicCookieKey and value are used as a very basic verification
+ // that a plugin is intended to be launched. This is not a security
+ // measure, just a UX feature. If the magic cookie doesn't match,
+ // we show human-friendly output.
+ MagicCookieKey string
+ MagicCookieValue string
+}
+
+// ServeConfig configures what sorts of plugins are served.
+type ServeConfig struct {
+ // HandshakeConfig is the configuration that must match clients.
+ HandshakeConfig
+
+ // Plugins are the plugins that are served.
+ Plugins map[string]Plugin
+
+ // TLSProvider is a function that returns a configured tls.Config.
+ TLSProvider func() (*tls.Config, error)
+}
+
+// Serve serves the plugins given by ServeConfig.
+//
+// Serve doesn't return until the plugin is done being executed. Any
+// errors will be outputted to the log.
+//
+// This is the method that plugins should call in their main() functions.
+func Serve(opts *ServeConfig) {
+ // Validate the handshake config
+ if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" {
+ fmt.Fprintf(os.Stderr,
+ "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+
+ "key or value was set. Please notify the plugin author and report\n"+
+ "this as a bug.\n")
+ os.Exit(1)
+ }
+
+ // First check the cookie
+ if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue {
+ fmt.Fprintf(os.Stderr,
+ "This binary is a plugin. These are not meant to be executed directly.\n"+
+ "Please execute the program that consumes these plugins, which will\n"+
+ "load any plugins automatically\n")
+ os.Exit(1)
+ }
+
+ // Logging goes to the original stderr
+ log.SetOutput(os.Stderr)
+
+ // Create our new stdout, stderr files. These will override our built-in
+ // stdout/stderr so that it works across the stream boundary.
+ stdout_r, stdout_w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
+ os.Exit(1)
+ }
+ stderr_r, stderr_w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
+ os.Exit(1)
+ }
+
+ // Register a listener so we can accept a connection
+ listener, err := serverListener()
+ if err != nil {
+ log.Printf("[ERR] plugin: plugin init: %s", err)
+ return
+ }
+
+ if opts.TLSProvider != nil {
+ tlsConfig, err := opts.TLSProvider()
+ if err != nil {
+ log.Printf("[ERR] plugin: plugin tls init: %s", err)
+ return
+ }
+ listener = tls.NewListener(listener, tlsConfig)
+ }
+ defer listener.Close()
+
+ // Create the channel to tell us when we're done
+ doneCh := make(chan struct{})
+
+ // Create the RPC server to dispense
+ server := &RPCServer{
+ Plugins: opts.Plugins,
+ Stdout: stdout_r,
+ Stderr: stderr_r,
+ DoneCh: doneCh,
+ }
+
+ // Output the address and service name to stdout so that core can bring it up.
+ log.Printf("[DEBUG] plugin: plugin address: %s %s\n",
+ listener.Addr().Network(), listener.Addr().String())
+ fmt.Printf("%d|%d|%s|%s\n",
+ CoreProtocolVersion,
+ opts.ProtocolVersion,
+ listener.Addr().Network(),
+ listener.Addr().String())
+ os.Stdout.Sync()
+
+ // Eat the interrupts
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, os.Interrupt)
+ go func() {
+ var count int32 = 0
+ for {
+ <-ch
+ newCount := atomic.AddInt32(&count, 1)
+ log.Printf(
+ "[DEBUG] plugin: received interrupt signal (count: %d). Ignoring.",
+ newCount)
+ }
+ }()
+
+ // Set our new out, err
+ os.Stdout = stdout_w
+ os.Stderr = stderr_w
+
+ // Serve
+ go server.Accept(listener)
+
+ // Wait for the graceful exit
+ <-doneCh
+}
+
+func serverListener() (net.Listener, error) {
+ if runtime.GOOS == "windows" {
+ return serverListener_tcp()
+ }
+
+ return serverListener_unix()
+}
+
+func serverListener_tcp() (net.Listener, error) {
+ minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ for port := minPort; port <= maxPort; port++ {
+ address := fmt.Sprintf("127.0.0.1:%d", port)
+ listener, err := net.Listen("tcp", address)
+ if err == nil {
+ return listener, nil
+ }
+ }
+
+ return nil, errors.New("Couldn't bind plugin TCP listener")
+}
+
+func serverListener_unix() (net.Listener, error) {
+ tf, err := ioutil.TempFile("", "plugin")
+ if err != nil {
+ return nil, err
+ }
+ path := tf.Name()
+
+ // Close the file and remove it because it has to not exist for
+ // the domain socket.
+ if err := tf.Close(); err != nil {
+ return nil, err
+ }
+ if err := os.Remove(path); err != nil {
+ return nil, err
+ }
+
+ l, err := net.Listen("unix", path)
+ if err != nil {
+ return nil, err
+ }
+
+ // Wrap the listener in rmListener so that the Unix domain socket file
+ // is removed on close.
+ return &rmListener{
+ Listener: l,
+ Path: path,
+ }, nil
+}
+
+// rmListener is an implementation of net.Listener that forwards most
+// calls to the listener but also removes a file as part of the close. We
+// use this to cleanup the unix domain socket on close.
+type rmListener struct {
+ net.Listener
+ Path string
+}
+
+func (l *rmListener) Close() error {
+ // Close the listener itself
+ if err := l.Listener.Close(); err != nil {
+ return err
+ }
+
+ // Remove the file
+ return os.Remove(l.Path)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go
new file mode 100644
index 00000000..033079ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go
@@ -0,0 +1,31 @@
+package plugin
+
+import (
+ "fmt"
+ "os"
+)
+
+// ServeMuxMap is the type that is used to configure ServeMux
+type ServeMuxMap map[string]*ServeConfig
+
+// ServeMux is like Serve, but serves multiple types of plugins determined
+// by the argument given on the command-line.
+//
+// This command doesn't return until the plugin is done being executed. Any
+// errors are logged or output to stderr.
+func ServeMux(m ServeMuxMap) {
+ if len(os.Args) != 2 {
+ fmt.Fprintf(os.Stderr,
+ "Invoked improperly. This is an internal command that shouldn't\n"+
+ "be manually invoked.\n")
+ os.Exit(1)
+ }
+
+ opts, ok := m[os.Args[1]]
+ if !ok {
+ fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1])
+ os.Exit(1)
+ }
+
+ Serve(opts)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go
new file mode 100644
index 00000000..1d547aaa
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/stream.go
@@ -0,0 +1,18 @@
+package plugin
+
+import (
+ "io"
+ "log"
+)
+
+func copyStream(name string, dst io.Writer, src io.Reader) {
+ if src == nil {
+ panic(name + ": src is nil")
+ }
+ if dst == nil {
+ panic(name + ": dst is nil")
+ }
+ if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
+ log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go
new file mode 100644
index 00000000..9086a1b4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/testing.go
@@ -0,0 +1,76 @@
+package plugin
+
+import (
+ "bytes"
+ "net"
+ "net/rpc"
+ "testing"
+)
+
+// The testing file contains test helpers that you can use outside of
+// this package for making it easier to test plugins themselves.
+
+// TestConn is a helper function for returning a client and server
+// net.Conn connected to each other.
+func TestConn(t *testing.T) (net.Conn, net.Conn) {
+ // Listen to any local port. This listener will be closed
+ // after a single connection is established.
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Start a goroutine to accept our client connection
+ var serverConn net.Conn
+ doneCh := make(chan struct{})
+ go func() {
+ defer close(doneCh)
+ defer l.Close()
+ var err error
+ serverConn, err = l.Accept()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ }()
+
+ // Connect to the server
+ clientConn, err := net.Dial("tcp", l.Addr().String())
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Wait for the server side to acknowledge it has connected
+ <-doneCh
+
+ return clientConn, serverConn
+}
+
+// TestRPCConn returns a rpc client and server connected to each other.
+func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) {
+ clientConn, serverConn := TestConn(t)
+
+ server := rpc.NewServer()
+ go server.ServeConn(serverConn)
+
+ client := rpc.NewClient(clientConn)
+ return client, server
+}
+
+// TestPluginRPCConn returns a plugin RPC client and server that are connected
+// together and configured.
+func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) {
+ // Create two net.Conns we can use to shuttle our control connection
+ clientConn, serverConn := TestConn(t)
+
+ // Start up the server
+ server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)}
+ go server.ServeConn(serverConn)
+
+ // Connect the client to the server
+ client, err := NewRPCClient(clientConn, ps)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ return client, server
+}
diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE
new file mode 100644
index 00000000..e87a115e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md
new file mode 100644
index 00000000..02565c8c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/README.md
@@ -0,0 +1,8 @@
+# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid)
+
+Generates UUID-format strings using high quality, purely random bytes. It can also parse UUID-format strings into their component bytes.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid).
diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go
new file mode 100644
index 00000000..ff9364c4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/uuid.go
@@ -0,0 +1,65 @@
+package uuid
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+)
+
+// GenerateRandomBytes is used to generate random bytes of given size.
+func GenerateRandomBytes(size int) ([]byte, error) {
+ buf := make([]byte, size)
+ if _, err := rand.Read(buf); err != nil {
+ return nil, fmt.Errorf("failed to read random bytes: %v", err)
+ }
+ return buf, nil
+}
+
+// GenerateUUID is used to generate a random UUID
+func GenerateUUID() (string, error) {
+ buf, err := GenerateRandomBytes(16)
+ if err != nil {
+ return "", err
+ }
+ return FormatUUID(buf)
+}
+
+func FormatUUID(buf []byte) (string, error) {
+ if len(buf) != 16 {
+ return "", fmt.Errorf("wrong length byte slice (%d)", len(buf))
+ }
+
+ return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
+ buf[0:4],
+ buf[4:6],
+ buf[6:8],
+ buf[8:10],
+ buf[10:16]), nil
+}
+
+func ParseUUID(uuid string) ([]byte, error) {
+ if len(uuid) != 36 {
+ return nil, fmt.Errorf("uuid string is wrong length")
+ }
+
+ hyph := []byte("-")
+
+ if uuid[8] != hyph[0] ||
+ uuid[13] != hyph[0] ||
+ uuid[18] != hyph[0] ||
+ uuid[23] != hyph[0] {
+ return nil, fmt.Errorf("uuid is improperly formatted")
+ }
+
+ hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36]
+
+ ret, err := hex.DecodeString(hexStr)
+ if err != nil {
+ return nil, err
+ }
+ if len(ret) != 16 {
+ return nil, fmt.Errorf("decoded hex is the wrong length")
+ }
+
+ return ret, nil
+}
diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md
new file mode 100644
index 00000000..6f3a15ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/README.md
@@ -0,0 +1,65 @@
+# Versioning Library for Go
+[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version)
+
+go-version is a library for parsing versions and version constraints,
+and verifying versions against a set of constraints. go-version
+can sort a collection of versions properly, handles prerelease/beta
+versions, can increment versions, etc.
+
+Versions used with go-version must follow [SemVer](http://semver.org/).
+
+## Installation and Usage
+
+Package documentation can be found on
+[GoDoc](http://godoc.org/github.com/hashicorp/go-version).
+
+Installation can be done with a normal `go get`:
+
+```
+$ go get github.com/hashicorp/go-version
+```
+
+#### Version Parsing and Comparison
+
+```go
+v1, err := version.NewVersion("1.2")
+v2, err := version.NewVersion("1.5+metadata")
+
+// Comparison example. There is also GreaterThan, Equal, and just
+// a simple Compare that returns an int allowing easy >=, <=, etc.
+if v1.LessThan(v2) {
+ fmt.Printf("%s is less than %s", v1, v2)
+}
+```
+
+#### Version Constraints
+
+```go
+v1, err := version.NewVersion("1.2")
+
+// Constraints example.
+constraints, err := version.NewConstraint(">= 1.0, < 1.4")
+if constraints.Check(v1) {
+ fmt.Printf("%s satisfies constraints %s", v1, constraints)
+}
+```
+
+#### Version Sorting
+
+```go
+versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"}
+versions := make([]*version.Version, len(versionsRaw))
+for i, raw := range versionsRaw {
+ v, _ := version.NewVersion(raw)
+ versions[i] = v
+}
+
+// After this, the versions are properly sorted
+sort.Sort(version.Collection(versions))
+```
+
+## Issues and Contributing
+
+If you find an issue with this library, please report an issue. If you'd
+like, we welcome any contributions. Fork this library and submit a pull
+request.
diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go
new file mode 100644
index 00000000..8c73df06
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/constraint.go
@@ -0,0 +1,178 @@
+package version
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// Constraint represents a single constraint for a version, such as
+// ">= 1.0".
+type Constraint struct {
+ f constraintFunc
+ check *Version
+ original string
+}
+
+// Constraints is a slice of constraints. We make a custom type so that
+// we can add methods to it.
+type Constraints []*Constraint
+
+type constraintFunc func(v, c *Version) bool
+
+var constraintOperators map[string]constraintFunc
+
+var constraintRegexp *regexp.Regexp
+
+func init() {
+ constraintOperators = map[string]constraintFunc{
+ "": constraintEqual,
+ "=": constraintEqual,
+ "!=": constraintNotEqual,
+ ">": constraintGreaterThan,
+ "<": constraintLessThan,
+ ">=": constraintGreaterThanEqual,
+ "<=": constraintLessThanEqual,
+ "~>": constraintPessimistic,
+ }
+
+ ops := make([]string, 0, len(constraintOperators))
+ for k := range constraintOperators {
+ ops = append(ops, regexp.QuoteMeta(k))
+ }
+
+ constraintRegexp = regexp.MustCompile(fmt.Sprintf(
+ `^\s*(%s)\s*(%s)\s*$`,
+ strings.Join(ops, "|"),
+ VersionRegexpRaw))
+}
+
+// NewConstraint will parse one or more constraints from the given
+// constraint string. The string must be a comma-separated list of
+// constraints.
+func NewConstraint(v string) (Constraints, error) {
+ vs := strings.Split(v, ",")
+ result := make([]*Constraint, len(vs))
+ for i, single := range vs {
+ c, err := parseSingle(single)
+ if err != nil {
+ return nil, err
+ }
+
+ result[i] = c
+ }
+
+ return Constraints(result), nil
+}
+
+// Check tests if a version satisfies all the constraints.
+func (cs Constraints) Check(v *Version) bool {
+ for _, c := range cs {
+ if !c.Check(v) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Returns the string format of the constraints
+func (cs Constraints) String() string {
+ csStr := make([]string, len(cs))
+ for i, c := range cs {
+ csStr[i] = c.String()
+ }
+
+ return strings.Join(csStr, ",")
+}
+
+// Check tests if a constraint is validated by the given version.
+func (c *Constraint) Check(v *Version) bool {
+ return c.f(v, c.check)
+}
+
+func (c *Constraint) String() string {
+ return c.original
+}
+
+func parseSingle(v string) (*Constraint, error) {
+ matches := constraintRegexp.FindStringSubmatch(v)
+ if matches == nil {
+ return nil, fmt.Errorf("Malformed constraint: %s", v)
+ }
+
+ check, err := NewVersion(matches[2])
+ if err != nil {
+ return nil, err
+ }
+
+ return &Constraint{
+ f: constraintOperators[matches[1]],
+ check: check,
+ original: v,
+ }, nil
+}
+
+//-------------------------------------------------------------------
+// Constraint functions
+//-------------------------------------------------------------------
+
+func constraintEqual(v, c *Version) bool {
+ return v.Equal(c)
+}
+
+func constraintNotEqual(v, c *Version) bool {
+ return !v.Equal(c)
+}
+
+func constraintGreaterThan(v, c *Version) bool {
+ return v.Compare(c) == 1
+}
+
+func constraintLessThan(v, c *Version) bool {
+ return v.Compare(c) == -1
+}
+
+func constraintGreaterThanEqual(v, c *Version) bool {
+ return v.Compare(c) >= 0
+}
+
+func constraintLessThanEqual(v, c *Version) bool {
+ return v.Compare(c) <= 0
+}
+
+func constraintPessimistic(v, c *Version) bool {
+ // If the version being checked is naturally less than the constraint, then there
+ // is no way for the version to be valid against the constraint
+ if v.LessThan(c) {
+ return false
+ }
+ // We'll use this more than once, so grab the length now so it's a little cleaner
+ // to write the later checks
+ cs := len(c.segments)
+
+ // If the version being checked has less specificity than the constraint, then there
+ // is no way for the version to be valid against the constraint
+ if cs > len(v.segments) {
+ return false
+ }
+
+ // Check the segments in the constraint against those in the version. If the version
+ // being checked, at any point, does not have the same values in each index of the
+ // constraints segments, then it cannot be valid against the constraint.
+ for i := 0; i < c.si-1; i++ {
+ if v.segments[i] != c.segments[i] {
+ return false
+ }
+ }
+
+ // Check the last part of the segment in the constraint. If the version segment at
+ // this index is less than the constraints segment at this index, then it cannot
+ // be valid against the constraint
+ if c.segments[cs-1] > v.segments[cs-1] {
+ return false
+ }
+
+ // If nothing has rejected the version by now, it's valid
+ return true
+}
diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go
new file mode 100644
index 00000000..dfe509ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/version.go
@@ -0,0 +1,322 @@
+package version
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// The compiled regular expression used to test the validity of a version.
+var versionRegexp *regexp.Regexp
+
+// The raw regular expression string used for testing the validity
+// of a version.
+const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
+ `(-?([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `?`
+
+// Version represents a single version.
+type Version struct {
+ metadata string
+ pre string
+ segments []int64
+ si int
+}
+
+func init() {
+ versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
+}
+
+// NewVersion parses the given version and returns a new
+// Version.
+func NewVersion(v string) (*Version, error) {
+ matches := versionRegexp.FindStringSubmatch(v)
+ if matches == nil {
+ return nil, fmt.Errorf("Malformed version: %s", v)
+ }
+ segmentsStr := strings.Split(matches[1], ".")
+ segments := make([]int64, len(segmentsStr))
+ si := 0
+ for i, str := range segmentsStr {
+ val, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing version: %s", err)
+ }
+
+ segments[i] = int64(val)
+ si++
+ }
+
+ // Even though we could support more than three segments, if we
+ // got less than three, pad it with 0s. This is to cover the basic
+ // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum
+ for i := len(segments); i < 3; i++ {
+ segments = append(segments, 0)
+ }
+
+ return &Version{
+ metadata: matches[7],
+ pre: matches[4],
+ segments: segments,
+ si: si,
+ }, nil
+}
+
+// Must is a helper that wraps a call to a function returning (*Version, error)
+// and panics if error is non-nil.
+func Must(v *Version, err error) *Version {
+ if err != nil {
+ panic(err)
+ }
+
+ return v
+}
+
+// Compare compares this version to another version. This
+// returns -1, 0, or 1 if this version is smaller, equal,
+// or larger than the other version, respectively.
+//
+// If you want boolean results, use the LessThan, Equal,
+// or GreaterThan methods.
+func (v *Version) Compare(other *Version) int {
+ // A quick, efficient equality check
+ if v.String() == other.String() {
+ return 0
+ }
+
+ segmentsSelf := v.Segments64()
+ segmentsOther := other.Segments64()
+
+ // If the segments are the same, we must compare on prerelease info
+ if reflect.DeepEqual(segmentsSelf, segmentsOther) {
+ preSelf := v.Prerelease()
+ preOther := other.Prerelease()
+ if preSelf == "" && preOther == "" {
+ return 0
+ }
+ if preSelf == "" {
+ return 1
+ }
+ if preOther == "" {
+ return -1
+ }
+
+ return comparePrereleases(preSelf, preOther)
+ }
+
+ // Get the highest specificity (hS), or if they're equal, just use segmentSelf length
+ lenSelf := len(segmentsSelf)
+ lenOther := len(segmentsOther)
+ hS := lenSelf
+ if lenSelf < lenOther {
+ hS = lenOther
+ }
+ // Compare the segments
+ // Because a constraint could have more/less specificity than the version it's
+ // checking, we need to account for a lopsided or jagged comparison
+ for i := 0; i < hS; i++ {
+ if i > lenSelf-1 {
+ // This means Self had the lower specificity
+ // Check to see if the remaining segments in Other are all zeros
+ if !allZero(segmentsOther[i:]) {
+ // if not, it means that Other has to be greater than Self
+ return -1
+ }
+ break
+ } else if i > lenOther-1 {
+ // this means Other had the lower specificity
+ // Check to see if the remaining segments in Self are all zeros -
+ if !allZero(segmentsSelf[i:]) {
+ //if not, it means that Self has to be greater than Other
+ return 1
+ }
+ break
+ }
+ lhs := segmentsSelf[i]
+ rhs := segmentsOther[i]
+ if lhs == rhs {
+ continue
+ } else if lhs < rhs {
+ return -1
+ }
+ // Otherwis, rhs was > lhs, they're not equal
+ return 1
+ }
+
+ // if we got this far, they're equal
+ return 0
+}
+
+func allZero(segs []int64) bool {
+ for _, s := range segs {
+ if s != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func comparePart(preSelf string, preOther string) int {
+ if preSelf == preOther {
+ return 0
+ }
+
+ selfNumeric := true
+ _, err := strconv.ParseInt(preSelf, 10, 64)
+ if err != nil {
+ selfNumeric = false
+ }
+
+ otherNumeric := true
+ _, err = strconv.ParseInt(preOther, 10, 64)
+ if err != nil {
+ otherNumeric = false
+ }
+
+ // if a part is empty, we use the other to decide
+ if preSelf == "" {
+ if otherNumeric {
+ return -1
+ }
+ return 1
+ }
+
+ if preOther == "" {
+ if selfNumeric {
+ return 1
+ }
+ return -1
+ }
+
+ if selfNumeric && !otherNumeric {
+ return -1
+ } else if !selfNumeric && otherNumeric {
+ return 1
+ } else if preSelf > preOther {
+ return 1
+ }
+
+ return -1
+}
+
+func comparePrereleases(v string, other string) int {
+ // the same pre release!
+ if v == other {
+ return 0
+ }
+
+ // split both pre releases for analyse their parts
+ selfPreReleaseMeta := strings.Split(v, ".")
+ otherPreReleaseMeta := strings.Split(other, ".")
+
+ selfPreReleaseLen := len(selfPreReleaseMeta)
+ otherPreReleaseLen := len(otherPreReleaseMeta)
+
+ biggestLen := otherPreReleaseLen
+ if selfPreReleaseLen > otherPreReleaseLen {
+ biggestLen = selfPreReleaseLen
+ }
+
+ // loop for parts to find the first difference
+ for i := 0; i < biggestLen; i = i + 1 {
+ partSelfPre := ""
+ if i < selfPreReleaseLen {
+ partSelfPre = selfPreReleaseMeta[i]
+ }
+
+ partOtherPre := ""
+ if i < otherPreReleaseLen {
+ partOtherPre = otherPreReleaseMeta[i]
+ }
+
+ compare := comparePart(partSelfPre, partOtherPre)
+ // if parts are equals, continue the loop
+ if compare != 0 {
+ return compare
+ }
+ }
+
+ return 0
+}
+
+// Equal tests if two versions are equal.
+func (v *Version) Equal(o *Version) bool {
+ return v.Compare(o) == 0
+}
+
+// GreaterThan tests if this version is greater than another version.
+func (v *Version) GreaterThan(o *Version) bool {
+ return v.Compare(o) > 0
+}
+
+// LessThan tests if this version is less than another version.
+func (v *Version) LessThan(o *Version) bool {
+ return v.Compare(o) < 0
+}
+
+// Metadata returns any metadata that was part of the version
+// string.
+//
+// Metadata is anything that comes after the "+" in the version.
+// For example, with "1.2.3+beta", the metadata is "beta".
+func (v *Version) Metadata() string {
+ return v.metadata
+}
+
+// Prerelease returns any prerelease data that is part of the version,
+// or blank if there is no prerelease data.
+//
+// Prerelease information is anything that comes after the "-" in the
+// version (but before any metadata). For example, with "1.2.3-beta",
+// the prerelease information is "beta".
+func (v *Version) Prerelease() string {
+ return v.pre
+}
+
+// Segments returns the numeric segments of the version as a slice of ints.
+//
+// This excludes any metadata or pre-release information. For example,
+// for a version "1.2.3-beta", segments will return a slice of
+// 1, 2, 3.
+func (v *Version) Segments() []int {
+ segmentSlice := make([]int, len(v.segments))
+ for i, v := range v.segments {
+ segmentSlice[i] = int(v)
+ }
+ return segmentSlice
+}
+
+// Segments64 returns the numeric segments of the version as a slice of int64s.
+//
+// This excludes any metadata or pre-release information. For example,
+// for a version "1.2.3-beta", segments will return a slice of
+// 1, 2, 3.
+func (v *Version) Segments64() []int64 {
+ return v.segments
+}
+
+// String returns the full version string included pre-release
+// and metadata information.
+func (v *Version) String() string {
+ var buf bytes.Buffer
+ fmtParts := make([]string, len(v.segments))
+ for i, s := range v.segments {
+ // We can ignore err here since we've pre-parsed the values in segments
+ str := strconv.FormatInt(s, 10)
+ fmtParts[i] = str
+ }
+ fmt.Fprintf(&buf, strings.Join(fmtParts, "."))
+ if v.pre != "" {
+ fmt.Fprintf(&buf, "-%s", v.pre)
+ }
+ if v.metadata != "" {
+ fmt.Fprintf(&buf, "+%s", v.metadata)
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go
new file mode 100644
index 00000000..cc888d43
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/version_collection.go
@@ -0,0 +1,17 @@
+package version
+
+// Collection is a type that implements the sort.Interface interface
+// so that versions can be sorted.
+type Collection []*Version
+
+func (v Collection) Len() int {
+ return len(v)
+}
+
+func (v Collection) Less(i, j int) bool {
+ return v[i].LessThan(v[j])
+}
+
+func (v Collection) Swap(i, j int) {
+ v[i], v[j] = v[j], v[i]
+}
diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md
new file mode 100644
index 00000000..c8223326
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/README.md
@@ -0,0 +1,125 @@
+# HCL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
+
+HCL (HashiCorp Configuration Language) is a configuration language built
+by HashiCorp. The goal of HCL is to build a structured configuration language
+that is both human and machine friendly for use with command-line tools, but
+specifically targeted towards DevOps tools, servers, etc.
+
+HCL is also fully JSON compatible. That is, JSON can be used as completely
+valid input to a system expecting HCL. This helps makes systems
+interoperable with other systems.
+
+HCL is heavily inspired by
+[libucl](https://github.com/vstakhov/libucl),
+nginx configuration, and others similar.
+
+## Why?
+
+A common question when viewing HCL is to ask the question: why not
+JSON, YAML, etc.?
+
+Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
+used a variety of configuration languages from full programming languages
+such as Ruby to complete data structure languages such as JSON. What we
+learned is that some people wanted human-friendly configuration languages
+and some people wanted machine-friendly languages.
+
+JSON fits a nice balance in this, but is fairly verbose and most
+importantly doesn't support comments. With YAML, we found that beginners
+had a really hard time determining what the actual structure was, and
+ended up guessing more often than not whether to use a hyphen, colon, etc.
+in order to represent some configuration key.
+
+Full programming languages such as Ruby enable complex behavior
+a configuration language shouldn't usually allow, and also forces
+people to learn some set of Ruby.
+
+Because of this, we decided to create our own configuration language
+that is JSON-compatible. Our configuration language (HCL) is designed
+to be written and modified by humans. The API for HCL allows JSON
+as an input so that it is also machine-friendly (machines can generate
+JSON instead of trying to generate HCL).
+
+Our goal with HCL is not to alienate other configuration languages.
+It is instead to provide HCL as a specialized language for our tools,
+and JSON as the interoperability layer.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammar is listed here.
+
+ * Single line comments start with `#` or `//`
+
+ * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
+ are not allowed. A multi-line comment (also known as a block comment)
+ terminates at the first `*/` found.
+
+ * Values are assigned with the syntax `key = value` (whitespace doesn't
+ matter). The value can be any primitive: a string, number, boolean,
+ object, or list.
+
+ * Strings are double-quoted and can contain any UTF-8 characters.
+ Example: `"Hello, World"`
+
+ * Multi-line strings start with `<<EOF` at the end of a line, and end
+ with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
+ Any text may be used in place of `EOF`. Example:
+```
+<<FOO
+hello
+world
+FOO
+```
+
+ * Numbers are assumed to be base 10. If you prefix a number with 0x,
+ it is treated as a hexadecimal. If it is prefixed with 0, it is
+ treated as an octal. Numbers can be in scientific notation: "1e10".
+
+ * Boolean values: `true`, `false`
+
+ * Arrays can be made by wrapping it in `[]`. Example:
+ `["foo", "bar", 42]`. Arrays can contain primitives,
+ other arrays, and objects. As an alternative, lists
+ of objects can be created with repeated blocks, using
+ this structure:
+
+ ```hcl
+ service {
+ key = "value"
+ }
+
+ service {
+ key = "value"
+ }
+ ```
+
+Objects and nested objects are created using the structure shown below:
+
+```
+variable "ami" {
+ description = "the AMI to use"
+}
+```
+This would be equivalent to the following json:
+``` json
+{
+ "variable": {
+ "ami": {
+ "description": "the AMI to use"
+ }
+ }
+}
+```
+
+## Thanks
+
+Thanks to:
+
+ * [@vstakhov](https://github.com/vstakhov) - The original libucl parser
+ and syntax that HCL was based off of.
+
+ * [@fatih](https://github.com/fatih) - The rewritten HCL parser
+ in pure Go (no goyacc) and support for a printer.
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
new file mode 100644
index 00000000..0b39c1b9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -0,0 +1,724 @@
+package hcl
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// This is the tag to use with structures to have settings for HCL
+const tagName = "hcl"
+
+var (
+ // nodeType holds a reference to the type of ast.Node
+ nodeType reflect.Type = findNodeType()
+)
+
+// Unmarshal accepts a byte slice as input and writes the
+// data to the value pointed to by v.
+func Unmarshal(bs []byte, v interface{}) error {
+ root, err := parse(bs)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(v, root)
+}
+
+// Decode reads the given input and decodes it into the structure
+// given by `out`.
+func Decode(out interface{}, in string) error {
+ obj, err := Parse(in)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(out, obj)
+}
+
+// DecodeObject is a lower-level version of Decode. It decodes a
+// raw Object into the given output.
+func DecodeObject(out interface{}, n ast.Node) error {
+ val := reflect.ValueOf(out)
+ if val.Kind() != reflect.Ptr {
+ return errors.New("result must be a pointer")
+ }
+
+ // If we have the file, we really decode the root node
+ if f, ok := n.(*ast.File); ok {
+ n = f.Node
+ }
+
+ var d decoder
+ return d.decode("root", n, val.Elem())
+}
+
+type decoder struct {
+ stack []reflect.Kind
+}
+
+func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
+ k := result
+
+ // If we have an interface with a valid value, we use that
+ // for the check.
+ if result.Kind() == reflect.Interface {
+ elem := result.Elem()
+ if elem.IsValid() {
+ k = elem
+ }
+ }
+
+ // Push current onto stack unless it is an interface.
+ if k.Kind() != reflect.Interface {
+ d.stack = append(d.stack, k.Kind())
+
+ // Schedule a pop
+ defer func() {
+ d.stack = d.stack[:len(d.stack)-1]
+ }()
+ }
+
+ switch k.Kind() {
+ case reflect.Bool:
+ return d.decodeBool(name, node, result)
+ case reflect.Float64:
+ return d.decodeFloat(name, node, result)
+ case reflect.Int, reflect.Int32, reflect.Int64:
+ return d.decodeInt(name, node, result)
+ case reflect.Interface:
+ // When we see an interface, we make our own thing
+ return d.decodeInterface(name, node, result)
+ case reflect.Map:
+ return d.decodeMap(name, node, result)
+ case reflect.Ptr:
+ return d.decodePtr(name, node, result)
+ case reflect.Slice:
+ return d.decodeSlice(name, node, result)
+ case reflect.String:
+ return d.decodeString(name, node, result)
+ case reflect.Struct:
+ return d.decodeStruct(name, node, result)
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
+ }
+ }
+}
+
+func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.BOOL {
+ v, err := strconv.ParseBool(n.Token.Text)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.FLOAT {
+ v, err := strconv.ParseFloat(n.Token.Text, 64)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ v, err := strconv.ParseInt(n.Token.Text, 0, 0)
+ if err != nil {
+ return err
+ }
+
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
+ return nil
+ case token.STRING:
+ v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
+ if err != nil {
+ return err
+ }
+
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
+ // When we see an ast.Node, we retain the value to enable deferred decoding.
+ // Very useful in situations where we want to preserve ast.Node information
+ // like Pos
+ if result.Type() == nodeType && result.CanSet() {
+ result.Set(reflect.ValueOf(node))
+ return nil
+ }
+
+ var set reflect.Value
+ redecode := true
+
+ // For testing types, ObjectType should just be treated as a list. We
+ // set this to a temporary var because we want to pass in the real node.
+ testNode := node
+ if ot, ok := node.(*ast.ObjectType); ok {
+ testNode = ot.List
+ }
+
+ switch n := testNode.(type) {
+ case *ast.ObjectList:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
+ set = result
+ }
+ case *ast.ObjectType:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
+ set = result
+ }
+ case *ast.ListType:
+ var temp []interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
+ set = result
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.BOOL:
+ var result bool
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.FLOAT:
+ var result float64
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.NUMBER:
+ var result int
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.STRING, token.HEREDOC:
+ set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
+ }
+ }
+ default:
+ return fmt.Errorf(
+ "%s: cannot decode into interface: %T",
+ name, node)
+ }
+
+ // Set the result to what its supposed to be, then reset
+ // result so we don't reflect into this method anymore.
+ result.Set(set)
+
+ if redecode {
+ // Revisit the node so that we can use the newly instantiated
+ // thing and populate it.
+ if err := d.decode(name, node, result); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
+ if item, ok := node.(*ast.ObjectItem); ok {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ n, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
+ }
+ }
+
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ resultKeyType := resultType.Key()
+ if resultKeyType.Kind() != reflect.String {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Make a map if it is nil
+ resultMap := result
+ if result.IsNil() {
+ resultMap = reflect.MakeMap(
+ reflect.MapOf(resultKeyType, resultElemType))
+ }
+
+ // Go through each element and decode it.
+ done := make(map[string]struct{})
+ for _, item := range n.Items {
+ if item.Val == nil {
+ continue
+ }
+
+ // github.com/hashicorp/terraform/issue/5740
+ if len(item.Keys) == 0 {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Get the key we're dealing with, which is the first item
+ keyStr := item.Keys[0].Token.Value().(string)
+
+ // If we've already processed this key, then ignore it
+ if _, ok := done[keyStr]; ok {
+ continue
+ }
+
+ // Determine the value. If we have more than one key, then we
+ // get the objectlist of only these keys.
+ itemVal := item.Val
+ if len(item.Keys) > 1 {
+ itemVal = n.Filter(keyStr)
+ done[keyStr] = struct{}{}
+ }
+
+ // Make the field name
+ fieldName := fmt.Sprintf("%s.%s", name, keyStr)
+
+ // Get the key/value as reflection values
+ key := reflect.ValueOf(keyStr)
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // If we have a pre-existing value in the map, use that
+ oldVal := resultMap.MapIndex(key)
+ if oldVal.IsValid() {
+ val.Set(oldVal)
+ }
+
+ // Decode!
+ if err := d.decode(fieldName, itemVal, val); err != nil {
+ return err
+ }
+
+ // Set the value on the map
+ resultMap.SetMapIndex(key, val)
+ }
+
+ // Set the final map if we can
+ set.Set(resultMap)
+ return nil
+}
+
+func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ val := reflect.New(resultElemType)
+ if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
+ return err
+ }
+
+ result.Set(val)
+ return nil
+}
+
+func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+ // Create the slice if it isn't nil
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ if result.IsNil() {
+ resultSliceType := reflect.SliceOf(resultElemType)
+ result = reflect.MakeSlice(
+ resultSliceType, 0, 0)
+ }
+
+ // Figure out the items we'll be copying into the slice
+ var items []ast.Node
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ items = make([]ast.Node, len(n.Items))
+ for i, item := range n.Items {
+ items[i] = item
+ }
+ case *ast.ObjectType:
+ items = []ast.Node{n}
+ case *ast.ListType:
+ items = n.List
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("unknown slice type: %T", node),
+ }
+ }
+
+ for i, item := range items {
+ fieldName := fmt.Sprintf("%s[%d]", name, i)
+
+ // Decode
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // if item is an object that was decoded from ambiguous JSON and
+ // flattened, make sure it's expanded if it needs to decode into a
+ // defined structure.
+ item := expandObject(item, val)
+
+ if err := d.decode(fieldName, item, val); err != nil {
+ return err
+ }
+
+ // Append it onto the slice
+ result = reflect.Append(result, val)
+ }
+
+ set.Set(result)
+ return nil
+}
+
+// expandObject detects if an ambiguous JSON object was flattened to a List which
+// should be decoded into a struct, and expands the ast to properly deocode.
+func expandObject(node ast.Node, result reflect.Value) ast.Node {
+ item, ok := node.(*ast.ObjectItem)
+ if !ok {
+ return node
+ }
+
+ elemType := result.Type()
+
+ // our target type must be a struct
+ switch elemType.Kind() {
+ case reflect.Ptr:
+ switch elemType.Elem().Kind() {
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+
+ // A list value will have a key and field name. If it had more fields,
+ // it wouldn't have been flattened.
+ if len(item.Keys) != 2 {
+ return node
+ }
+
+ keyToken := item.Keys[0].Token
+ item.Keys = item.Keys[1:]
+
+ // we need to un-flatten the ast enough to decode
+ newNode := &ast.ObjectItem{
+ Keys: []*ast.ObjectKey{
+ &ast.ObjectKey{
+ Token: keyToken,
+ },
+ },
+ Val: &ast.ObjectType{
+ List: &ast.ObjectList{
+ Items: []*ast.ObjectItem{item},
+ },
+ },
+ }
+
+ return newNode
+}
+
+func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
+ return nil
+ case token.STRING, token.HEREDOC:
+ result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type for string %T", name, node),
+ }
+}
+
+func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
+ var item *ast.ObjectItem
+ if it, ok := node.(*ast.ObjectItem); ok {
+ item = it
+ node = it.Val
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ // Handle the special case where the object itself is a literal. Previously
+ // the yacc parser would always ensure top-level elements were arrays. The new
+ // parser does not make the same guarantees, thus we need to convert any
+ // top-level literal elements into a list.
+ if _, ok := node.(*ast.LiteralType); ok && item != nil {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ list, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
+ }
+ }
+
+ // This slice will keep track of all the structs we'll be decoding.
+ // There can be more than one struct if there are embedded structs
+ // that are squashed.
+ structs := make([]reflect.Value, 1, 5)
+ structs[0] = result
+
+ // Compile the list of all the fields that we're going to be decoding
+ // from all the structs.
+ fields := make(map[*reflect.StructField]reflect.Value)
+ for len(structs) > 0 {
+ structVal := structs[0]
+ structs = structs[1:]
+
+ structType := structVal.Type()
+ for i := 0; i < structType.NumField(); i++ {
+ fieldType := structType.Field(i)
+ tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
+
+ // Ignore fields with tag name "-"
+ if tagParts[0] == "-" {
+ continue
+ }
+
+ if fieldType.Anonymous {
+ fieldKind := fieldType.Type.Kind()
+ if fieldKind != reflect.Struct {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unsupported type to struct: %s",
+ fieldType.Name, fieldKind),
+ }
+ }
+
+ // We have an embedded field. We "squash" the fields down
+ // if specified in the tag.
+ squash := false
+ for _, tag := range tagParts[1:] {
+ if tag == "squash" {
+ squash = true
+ break
+ }
+ }
+
+ if squash {
+ structs = append(
+ structs, result.FieldByName(fieldType.Name))
+ continue
+ }
+ }
+
+ // Normal struct field, store it away
+ fields[&fieldType] = structVal.Field(i)
+ }
+ }
+
+ usedKeys := make(map[string]struct{})
+ decodedFields := make([]string, 0, len(fields))
+ decodedFieldsVal := make([]reflect.Value, 0)
+ unusedKeysVal := make([]reflect.Value, 0)
+ for fieldType, field := range fields {
+ if !field.IsValid() {
+ // This should never happen
+ panic("field is not valid")
+ }
+
+ // If we can't set the field, then it is unexported or something,
+ // and we just continue onwards.
+ if !field.CanSet() {
+ continue
+ }
+
+ fieldName := fieldType.Name
+
+ tagValue := fieldType.Tag.Get(tagName)
+ tagParts := strings.SplitN(tagValue, ",", 2)
+ if len(tagParts) >= 2 {
+ switch tagParts[1] {
+ case "decodedFields":
+ decodedFieldsVal = append(decodedFieldsVal, field)
+ continue
+ case "key":
+ if item == nil {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: %s asked for 'key', impossible",
+ name, fieldName),
+ }
+ }
+
+ field.SetString(item.Keys[0].Token.Value().(string))
+ continue
+ case "unusedKeys":
+ unusedKeysVal = append(unusedKeysVal, field)
+ continue
+ }
+ }
+
+ if tagParts[0] != "" {
+ fieldName = tagParts[0]
+ }
+
+ // Determine the element we'll use to decode. If it is a single
+ // match (only object with the field), then we decode it exactly.
+ // If it is a prefix match, then we decode the matches.
+ filter := list.Filter(fieldName)
+
+ prefixMatches := filter.Children()
+ matches := filter.Elem()
+ if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
+ continue
+ }
+
+ // Track the used key
+ usedKeys[fieldName] = struct{}{}
+
+ // Create the field name and decode. We range over the elements
+ // because we actually want the value.
+ fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+ if len(prefixMatches.Items) > 0 {
+ if err := d.decode(fieldName, prefixMatches, field); err != nil {
+ return err
+ }
+ }
+ for _, match := range matches.Items {
+ var decodeNode ast.Node = match.Val
+ if ot, ok := decodeNode.(*ast.ObjectType); ok {
+ decodeNode = &ast.ObjectList{Items: ot.List.Items}
+ }
+
+ if err := d.decode(fieldName, decodeNode, field); err != nil {
+ return err
+ }
+ }
+
+ decodedFields = append(decodedFields, fieldType.Name)
+ }
+
+ if len(decodedFieldsVal) > 0 {
+ // Sort it so that it is deterministic
+ sort.Strings(decodedFields)
+
+ for _, v := range decodedFieldsVal {
+ v.Set(reflect.ValueOf(decodedFields))
+ }
+ }
+
+ return nil
+}
+
+// findNodeType returns the type of ast.Node
+func findNodeType() reflect.Type {
+ var nodeContainer struct {
+ Node ast.Node
+ }
+ value := reflect.ValueOf(nodeContainer).FieldByName("Node")
+ return value.Type()
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go
new file mode 100644
index 00000000..575a20b5
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl.go
@@ -0,0 +1,11 @@
+// Package hcl decodes HCL into usable Go structures.
+//
+// hcl input can come in either pure HCL format or JSON format.
+// It can be parsed into an AST, and then decoded into a structure,
+// or it can be decoded directly from a string into a structure.
+//
+// If you choose to parse HCL into a raw AST, the benefit is that you
+// can write custom visitor implementations to implement custom
+// semantic checks. By default, HCL does not perform any semantic
+// checks.
+package hcl
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
new file mode 100644
index 00000000..6e5ef654
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
@@ -0,0 +1,219 @@
+// Package ast declares the types used to represent syntax trees for HCL
+// (HashiCorp Configuration Language)
+package ast
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Node is an element in the abstract syntax tree.
+type Node interface {
+ node()
+ Pos() token.Pos
+}
+
+func (File) node() {}
+func (ObjectList) node() {}
+func (ObjectKey) node() {}
+func (ObjectItem) node() {}
+func (Comment) node() {}
+func (CommentGroup) node() {}
+func (ObjectType) node() {}
+func (LiteralType) node() {}
+func (ListType) node() {}
+
+// File represents a single HCL file
+type File struct {
+ Node Node // usually a *ObjectList
+ Comments []*CommentGroup // list of all comments in the source
+}
+
+func (f *File) Pos() token.Pos {
+ return f.Node.Pos()
+}
+
+// ObjectList represents a list of ObjectItems. An HCL file itself is an
+// ObjectList.
+type ObjectList struct {
+ Items []*ObjectItem
+}
+
+func (o *ObjectList) Add(item *ObjectItem) {
+ o.Items = append(o.Items, item)
+}
+
+// Filter filters out the objects with the given key list as a prefix.
+//
+// The returned list of objects contain ObjectItems where the keys have
+// this prefix already stripped off. This might result in objects with
+// zero-length key lists if they have no children.
+//
+// If no matches are found, an empty ObjectList (non-nil) is returned.
+func (o *ObjectList) Filter(keys ...string) *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ // If there aren't enough keys, then ignore this
+ if len(item.Keys) < len(keys) {
+ continue
+ }
+
+ match := true
+ for i, key := range item.Keys[:len(keys)] {
+ key := key.Token.Value().(string)
+ if key != keys[i] && !strings.EqualFold(key, keys[i]) {
+ match = false
+ break
+ }
+ }
+ if !match {
+ continue
+ }
+
+ // Strip off the prefix from the children
+ newItem := *item
+ newItem.Keys = newItem.Keys[len(keys):]
+ result.Add(&newItem)
+ }
+
+ return &result
+}
+
+// Children returns further nested objects (key length > 0) within this
+// ObjectList. This should be used with Filter to get at child items.
+func (o *ObjectList) Children() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) > 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+// Elem returns items in the list that are direct element assignments
+// (key length == 0). This should be used with Filter to get at elements.
+func (o *ObjectList) Elem() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) == 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+func (o *ObjectList) Pos() token.Pos {
+ // always returns the uninitiliazed position
+ return o.Items[0].Pos()
+}
+
+// ObjectItem represents a HCL Object Item. An item is represented with a key
+// (or keys). It can be an assignment or an object (both normal and nested)
+type ObjectItem struct {
+ // keys is only one length long if it's of type assignment. If it's a
+ // nested object it can be larger than one. In that case "assign" is
+ // invalid as there is no assignments for a nested object.
+ Keys []*ObjectKey
+
+ // assign contains the position of "=", if any
+ Assign token.Pos
+
+ // val is the item itself. It can be an object,list, number, bool or a
+ // string. If key length is larger than one, val can be only of type
+ // Object.
+ Val Node
+
+ LeadComment *CommentGroup // associated lead comment
+ LineComment *CommentGroup // associated line comment
+}
+
+func (o *ObjectItem) Pos() token.Pos {
+ // I'm not entirely sure what causes this, but removing this causes
+ // a test failure. We should investigate at some point.
+ if len(o.Keys) == 0 {
+ return token.Pos{}
+ }
+
+ return o.Keys[0].Pos()
+}
+
+// ObjectKeys are either an identifier or of type string.
+type ObjectKey struct {
+ Token token.Token
+}
+
+func (o *ObjectKey) Pos() token.Pos {
+ return o.Token.Pos
+}
+
+// LiteralType represents a literal of basic type. Valid types are:
+// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
+type LiteralType struct {
+ Token token.Token
+
+ // comment types, only used when in a list
+ LeadComment *CommentGroup
+ LineComment *CommentGroup
+}
+
+func (l *LiteralType) Pos() token.Pos {
+ return l.Token.Pos
+}
+
+// ListStatement represents a HCL List type
+type ListType struct {
+ Lbrack token.Pos // position of "["
+ Rbrack token.Pos // position of "]"
+ List []Node // the elements in lexical order
+}
+
+func (l *ListType) Pos() token.Pos {
+ return l.Lbrack
+}
+
+func (l *ListType) Add(node Node) {
+ l.List = append(l.List, node)
+}
+
+// ObjectType represents a HCL Object Type
+type ObjectType struct {
+ Lbrace token.Pos // position of "{"
+ Rbrace token.Pos // position of "}"
+ List *ObjectList // the nodes in lexical order
+}
+
+func (o *ObjectType) Pos() token.Pos {
+ return o.Lbrace
+}
+
+// Comment node represents a single //, # style or /*- style commment
+type Comment struct {
+ Start token.Pos // position of / or #
+ Text string
+}
+
+func (c *Comment) Pos() token.Pos {
+ return c.Start
+}
+
+// CommentGroup node represents a sequence of comments with no other tokens and
+// no empty lines between.
+type CommentGroup struct {
+ List []*Comment // len(List) > 0
+}
+
+func (c *CommentGroup) Pos() token.Pos {
+ return c.List[0].Pos()
+}
+
+//-------------------------------------------------------------------
+// GoStringer
+//-------------------------------------------------------------------
+
+func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
+func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
new file mode 100644
index 00000000..ba07ad42
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
@@ -0,0 +1,52 @@
+package ast
+
+import "fmt"
+
+// WalkFunc describes a function to be called for each node during a Walk. The
+// returned node can be used to rewrite the AST. Walking stops the returned
+// bool is false.
+type WalkFunc func(Node) (Node, bool)
+
+// Walk traverses an AST in depth-first order: It starts by calling fn(node);
+// node must not be nil. If fn returns true, Walk invokes fn recursively for
+// each of the non-nil children of node, followed by a call of fn(nil). The
+// returned node of fn can be used to rewrite the passed node to fn.
+func Walk(node Node, fn WalkFunc) Node {
+ rewritten, ok := fn(node)
+ if !ok {
+ return rewritten
+ }
+
+ switch n := node.(type) {
+ case *File:
+ n.Node = Walk(n.Node, fn)
+ case *ObjectList:
+ for i, item := range n.Items {
+ n.Items[i] = Walk(item, fn).(*ObjectItem)
+ }
+ case *ObjectKey:
+ // nothing to do
+ case *ObjectItem:
+ for i, k := range n.Keys {
+ n.Keys[i] = Walk(k, fn).(*ObjectKey)
+ }
+
+ if n.Val != nil {
+ n.Val = Walk(n.Val, fn)
+ }
+ case *LiteralType:
+ // nothing to do
+ case *ListType:
+ for i, l := range n.List {
+ n.List[i] = Walk(l, fn)
+ }
+ case *ObjectType:
+ n.List = Walk(n.List, fn).(*ObjectList)
+ default:
+ // should we panic here?
+ fmt.Printf("unknown type: %T\n", n)
+ }
+
+ fn(nil)
+ return rewritten
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
new file mode 100644
index 00000000..5c99381d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
@@ -0,0 +1,17 @@
+package parser
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// PosError is a parse error that contains a position.
+type PosError struct {
+ Pos token.Pos
+ Err error
+}
+
+func (e *PosError) Error() string {
+ return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
new file mode 100644
index 00000000..6e54bed9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -0,0 +1,514 @@
+// Package parser implements a parser for HCL (HashiCorp Configuration
+// Language)
+package parser
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/scanner"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ comments []*ast.CommentGroup
+ leadComment *ast.CommentGroup // last lead comment
+ lineComment *ast.CommentGroup // last line comment
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = &PosError{Pos: pos, Err: errors.New(msg)}
+ }
+
+ f.Node, err = p.objectList(false)
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ f.Comments = p.comments
+ return f, nil
+}
+
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ if obj {
+ tok := p.scan()
+ p.unscan()
+ if tok.Type == token.RBRACE {
+ break
+ }
+ }
+
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // object lists can be optionally comma-delimited e.g. when a list of maps
+ // is being expressed, so a comma is allowed here - it's simply consumed
+ tok := p.scan()
+ if tok.Type != token.COMMA {
+ p.unscan()
+ }
+ }
+ return node, nil
+}
+
+func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
+ endline = p.tok.Pos.Line
+
+ // count the endline if it's multiline comment, ie starting with /*
+ if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.tok.Text); i++ {
+ if p.tok.Text[i] == '\n' {
+ endline++
+ }
+ }
+ }
+
+ comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
+ p.tok = p.sc.Scan()
+ return
+}
+
+func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+ var list []*ast.Comment
+ endline = p.tok.Pos.Line
+
+ for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
+ var comment *ast.Comment
+ comment, endline = p.consumeComment()
+ list = append(list, comment)
+ }
+
+ // add comment group to the comments list
+ comments = &ast.CommentGroup{List: list}
+ p.comments = append(p.comments, comments)
+
+ return
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if len(keys) > 0 && err == errEofToken {
+ // We ignore eof token here since it is an error if we didn't
+ // receive a value (but we did receive a key) for the item.
+ err = nil
+ }
+ if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
+ // This is a strange boolean statement, but what it means is:
+ // We have keys with no value, and we're likely in an object
+ // (since RBrace ends an object). For this, we set err to nil so
+ // we continue and get the error below of having the wrong value
+ // type.
+ err = nil
+
+ // Reset the token type so we don't think it completed fine. See
+ // objectType which uses p.tok.Type to check if we're done with
+ // the object.
+ p.tok.Type = token.EOF
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ if p.leadComment != nil {
+ o.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ switch p.tok.Type {
+ case token.ASSIGN:
+ o.Assign = p.tok.Pos
+ o.Val, err = p.object()
+ if err != nil {
+ return nil, err
+ }
+ case token.LBRACE:
+ o.Val, err = p.objectType()
+ if err != nil {
+ return nil, err
+ }
+ default:
+ keyStr := make([]string, 0, len(keys))
+ for _, k := range keys {
+ keyStr = append(keyStr, k.Token.Text)
+ }
+
+ return nil, fmt.Errorf(
+ "key '%s' expected start of object ('{') or assignment ('=')",
+ strings.Join(keyStr, " "))
+ }
+
+ // do a look-ahead for line comment
+ p.scan()
+ if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
+ o.LineComment = p.lineComment
+ p.lineComment = nil
+ }
+ p.unscan()
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ // It is very important to also return the keys here as well as
+ // the error. This is because we need to be able to tell if we
+ // did parse keys prior to finding the EOF, or if we just found
+ // a bare EOF.
+ return keys, errEofToken
+ case token.ASSIGN:
+ // assignment or object only, but not nested objects. this is not
+ // allowed: `foo bar = {}`
+ if keyCount > 1 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
+ }
+ }
+
+ if keyCount == 0 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: errors.New("no object keys found!"),
+ }
+ }
+
+ return keys, nil
+ case token.LBRACE:
+ var err error
+
+ // If we have no keys, then it is a syntax error. i.e. {{}} is not
+ // allowed.
+ if len(keys) == 0 {
+ err = &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
+ }
+ }
+
+ // object
+ return keys, err
+ case token.IDENT, token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{Token: p.tok})
+ case token.ILLEGAL:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("illegal character"),
+ }
+ default:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
+ }
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (ast.Node, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.COMMENT:
+ // implement comment
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("Unknown token: %+v", tok),
+ }
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{
+ Lbrace: p.tok.Pos,
+ }
+
+ l, err := p.objectList(true)
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ // No error, scan and expect the ending to be a brace
+ if tok := p.scan(); tok.Type != token.RBRACE {
+ return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type)
+ }
+
+ o.List = l
+ o.Rbrace = p.tok.Pos // advanced via parseObjectList
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{
+ Lbrack: p.tok.Pos,
+ }
+
+ needComma := false
+ for {
+ tok := p.scan()
+ if needComma {
+ switch tok.Type {
+ case token.COMMA, token.RBRACK:
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error parsing list, expected comma or list end, got: %s",
+ tok.Type),
+ }
+ }
+ }
+ switch tok.Type {
+ case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ // If there is a lead comment, apply it
+ if p.leadComment != nil {
+ node.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ l.Add(node)
+ needComma = true
+ case token.COMMA:
+ // get next list item or we are at the end
+ // do a look-ahead for line comment
+ p.scan()
+ if p.lineComment != nil && len(l.List) > 0 {
+ lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
+ if ok {
+ lit.LineComment = p.lineComment
+ l.List[len(l.List)-1] = lit
+ p.lineComment = nil
+ }
+ }
+ p.unscan()
+
+ needComma = false
+ continue
+ case token.LBRACE:
+ // Looks like a nested object, so parse it out
+ node, err := p.objectType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse object within list: %s", err),
+ }
+ }
+ l.Add(node)
+ needComma = true
+ case token.LBRACK:
+ node, err := p.listType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse list within list: %s", err),
+ }
+ }
+ l.Add(node)
+ case token.RBRACK:
+ // finished
+ l.Rbrack = p.tok.Pos
+ return l, nil
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
+ }
+ }
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok,
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead. In the process, it collects any
+// comment groups encountered, and remembers the last lead and line comments.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ // Otherwise read the next token from the scanner and Save it to the buffer
+ // in case we unscan later.
+ prev := p.tok
+ p.tok = p.sc.Scan()
+
+ if p.tok.Type == token.COMMENT {
+ var comment *ast.CommentGroup
+ var endline int
+
+ // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
+ // p.tok.Pos.Line, prev.Pos.Line, endline)
+ if p.tok.Pos.Line == prev.Pos.Line {
+ // The comment is on same line as the previous token; it
+ // cannot be a lead comment but may be a line comment.
+ comment, endline = p.consumeCommentGroup(0)
+ if p.tok.Pos.Line != endline {
+ // The next token is on a different line, thus
+ // the last comment group is a line comment.
+ p.lineComment = comment
+ }
+ }
+
+ // consume successor comments, if any
+ endline = -1
+ for p.tok.Type == token.COMMENT {
+ comment, endline = p.consumeCommentGroup(1)
+ }
+
+ if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
+ switch p.tok.Type {
+ case token.RBRACE, token.RBRACK:
+ // Do not count for these cases
+ default:
+ // The next token is following on the line immediately after the
+ // comment group, thus the last comment group is a lead comment.
+ p.leadComment = comment
+ }
+ }
+
+ }
+
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
new file mode 100644
index 00000000..69662367
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -0,0 +1,651 @@
+// Package scanner implements a scanner for HCL (HashiCorp Configuration
+// Language) source text.
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "regexp"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ if ch == utf8.RuneError && size == 1 {
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ // If we see a null character with data left, then that is an error
+ if ch == '\x00' && s.buf.Len() > 0 {
+ s.err("unexpected null character (0x00)")
+ return eof
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ tok = token.IDENT
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '#', '/':
+ tok = token.COMMENT
+ s.scanComment(ch)
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '<':
+ tok = token.HEREDOC
+ s.scanHeredoc()
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case '=':
+ tok = token.ASSIGN
+ case '+':
+ tok = token.ADD
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ tok = token.SUB
+ }
+ default:
+ s.err("illegal char")
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+func (s *Scanner) scanComment(ch rune) {
+ // single line comments
+ if ch == '#' || (ch == '/' && s.peek() != '*') {
+ if ch == '/' && s.peek() != '/' {
+ s.err("expected '/' for comment")
+ return
+ }
+
+ ch = s.next()
+ for ch != '\n' && ch >= 0 && ch != eof {
+ ch = s.next()
+ }
+ if ch != eof && ch >= 0 {
+ s.unread()
+ }
+ return
+ }
+
+ // be sure we get the character after /* This allows us to find comment's
+ // that are not erminated
+ if ch == '/' {
+ s.next()
+ ch = s.next() // read character after "/*"
+ }
+
+ // look for /* - style comments
+ for {
+ if ch < 0 || ch == eof {
+ s.err("comment not terminated")
+ break
+ }
+
+ ch0 := ch
+ ch = s.next()
+ if ch0 == '*' && ch == '/' {
+ break
+ }
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ if ch == '0' {
+ // check for hexadecimal, octal or float
+ ch = s.next()
+ if ch == 'x' || ch == 'X' {
+ // hexadecimal
+ ch = s.next()
+ found := false
+ for isHexadecimal(ch) {
+ ch = s.next()
+ found = true
+ }
+
+ if !found {
+ s.err("illegal hexadecimal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ return token.NUMBER
+ }
+
+ // now it's either something like: 0421(octal) or 0.1231(float)
+ illegalOctal := false
+ for isDecimal(ch) {
+ ch = s.next()
+ if ch == '8' || ch == '9' {
+ // this is just a possibility. For example 0159 is illegal, but
+ // 0159.23 is valid. So we mark a possible illegal octal. If
+ // the next character is not a period, we'll print the error.
+ illegalOctal = true
+ }
+ }
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if illegalOctal {
+ s.err("illegal octal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+ }
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa begining from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanHeredoc scans a heredoc string
+func (s *Scanner) scanHeredoc() {
+ // Scan the second '<' in example: '<<EOF'
+ if s.next() != '<' {
+ s.err("heredoc expected second '<', didn't see it")
+ return
+ }
+
+ // Get the original offset so we can read just the heredoc ident
+ offs := s.srcPos.Offset
+
+ // Scan the identifier
+ ch := s.next()
+
+ // Indented heredoc syntax
+ if ch == '-' {
+ ch = s.next()
+ }
+
+ for isLetter(ch) || isDigit(ch) {
+ ch = s.next()
+ }
+
+ // If we reached an EOF then that is not good
+ if ch == eof {
+ s.err("heredoc not terminated")
+ return
+ }
+
+ // Ignore the '\r' in Windows line endings
+ if ch == '\r' {
+ if s.peek() == '\n' {
+ ch = s.next()
+ }
+ }
+
+ // If we didn't reach a newline then that is also not good
+ if ch != '\n' {
+ s.err("invalid characters in heredoc anchor")
+ return
+ }
+
+ // Read the identifier
+ identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
+ if len(identBytes) == 0 {
+ s.err("zero-length heredoc anchor")
+ return
+ }
+
+ var identRegexp *regexp.Regexp
+ if identBytes[0] == '-' {
+ identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
+ } else {
+ identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
+ }
+
+ // Read the actual string value
+ lineStart := s.srcPos.Offset
+ for {
+ ch := s.next()
+
+ // Special newline handling.
+ if ch == '\n' {
+ // Math is fast, so we first compare the byte counts to see if we have a chance
+ // of seeing the same identifier - if the length is less than the number of bytes
+ // in the identifier, this cannot be a valid terminator.
+ lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
+ if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
+ break
+ }
+
+ // Not an anchor match, record the start of a new line
+ lineStart = s.srcPos.Offset
+ }
+
+ if ch == eof {
+ s.err("heredoc not terminated")
+ return
+ }
+ }
+
+ return
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' && braces == 0 {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ start := n
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ if ch == eof {
+ // If we see an EOF, we halt any more scanning of digits
+ // immediately.
+ break
+ }
+
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ if n != start {
+ // we scanned all digits, put the last non digit char back,
+ // only if we read anything at all
+ s.unread()
+ }
+
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isDigit returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isDecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
new file mode 100644
index 00000000..5f981eaa
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -0,0 +1,241 @@
+package strconv
+
+import (
+ "errors"
+ "unicode/utf8"
+)
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes. (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (t string, err error) {
+ n := len(s)
+ if n < 2 {
+ return "", ErrSyntax
+ }
+ quote := s[0]
+ if quote != s[n-1] {
+ return "", ErrSyntax
+ }
+ s = s[1 : n-1]
+
+ if quote != '"' {
+ return "", ErrSyntax
+ }
+ if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+ return "", ErrSyntax
+ }
+
+ // Is it trivial? Avoid allocation.
+ if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
+ switch quote {
+ case '"':
+ return s, nil
+ case '\'':
+ r, size := utf8.DecodeRuneInString(s)
+ if size == len(s) && (r != utf8.RuneError || size != 1) {
+ return s, nil
+ }
+ }
+ }
+
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+ for len(s) > 0 {
+ // If we're starting a '${}' then let it through un-unquoted.
+ // Specifically: we don't unquote any characters within the `${}`
+ // section.
+ if s[0] == '$' && len(s) > 1 && s[1] == '{' {
+ buf = append(buf, '$', '{')
+ s = s[2:]
+
+ // Continue reading until we find the closing brace, copying as-is
+ braces := 1
+ for len(s) > 0 && braces > 0 {
+ r, size := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError {
+ return "", ErrSyntax
+ }
+
+ s = s[size:]
+
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+
+ switch r {
+ case '{':
+ braces++
+ case '}':
+ braces--
+ }
+ }
+ if braces != 0 {
+ return "", ErrSyntax
+ }
+ if len(s) == 0 {
+ // If there's no string left, we're done!
+ break
+ } else {
+ // If there's more left, we need to pop back up to the top of the loop
+ // in case there's another interpolation in this string.
+ continue
+ }
+ }
+
+ if s[0] == '\n' {
+ return "", ErrSyntax
+ }
+
+ c, multibyte, ss, err := unquoteChar(s, quote)
+ if err != nil {
+ return "", err
+ }
+ s = ss
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ if quote == '\'' && len(s) != 0 {
+ // single-quoted must be single character
+ return "", ErrSyntax
+ }
+ }
+ return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return
+}
+
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+ // easy cases
+ switch c := s[0]; {
+ case c == quote && (quote == '\'' || quote == '"'):
+ err = ErrSyntax
+ return
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // hard case: c is backslash
+ if len(s) <= 1 {
+ err = ErrSyntax
+ return
+ }
+ c := s[1]
+ s = s[2:]
+
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case 'x', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = ErrSyntax
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if c == 'x' {
+ // single-byte string, possibly not UTF-8
+ value = v
+ break
+ }
+ if v > utf8.MaxRune {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ multibyte = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ v := rune(c) - '0'
+ if len(s) < 2 {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < 2; j++ { // one digit already; two more
+ x := rune(s[j]) - '0'
+ if x < 0 || x > 7 {
+ err = ErrSyntax
+ return
+ }
+ v = (v << 3) | x
+ }
+ s = s[2:]
+ if v > 255 {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ case '\\':
+ value = '\\'
+ case '\'', '"':
+ if c != quote {
+ err = ErrSyntax
+ return
+ }
+ value = rune(c)
+ default:
+ err = ErrSyntax
+ return
+ }
+ tail = s
+ return
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
new file mode 100644
index 00000000..59c1bb72
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
new file mode 100644
index 00000000..e37c0664
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
@@ -0,0 +1,219 @@
+// Package token defines constants representing the lexical tokens for HCL
+// (HashiCorp Configuration Language)
+package token
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+ JSON bool
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+ COMMENT
+
+ identifier_beg
+ IDENT // literals
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ HEREDOC // <<FOO\nbar\nFOO
+ literal_end
+ identifier_end
+
+ operator_beg
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+
+ RBRACK // ]
+ RBRACE // }
+
+ ASSIGN // =
+ ADD // +
+ SUB // -
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+ COMMENT: "COMMENT",
+
+ IDENT: "IDENT",
+ NUMBER: "NUMBER",
+ FLOAT: "FLOAT",
+ BOOL: "BOOL",
+ STRING: "STRING",
+
+ LBRACK: "LBRACK",
+ LBRACE: "LBRACE",
+ COMMA: "COMMA",
+ PERIOD: "PERIOD",
+ HEREDOC: "HEREDOC",
+
+ RBRACK: "RBRACK",
+ RBRACE: "RBRACE",
+
+ ASSIGN: "ASSIGN",
+ ADD: "ADD",
+ SUB: "SUB",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+ s := ""
+ if 0 <= t && t < Type(len(tokens)) {
+ s = tokens[t]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(t)) + ")"
+ }
+ return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+ return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// Value returns the properly typed value for this token. The type of
+// the returned interface{} is guaranteed based on the Type field.
+//
+// This can only be called for literal types. If it is called for any other
+// type, this will panic.
+func (t Token) Value() interface{} {
+ switch t.Type {
+ case BOOL:
+ if t.Text == "true" {
+ return true
+ } else if t.Text == "false" {
+ return false
+ }
+
+ panic("unknown bool value: " + t.Text)
+ case FLOAT:
+ v, err := strconv.ParseFloat(t.Text, 64)
+ if err != nil {
+ panic(err)
+ }
+
+ return float64(v)
+ case NUMBER:
+ v, err := strconv.ParseInt(t.Text, 0, 64)
+ if err != nil {
+ panic(err)
+ }
+
+ return int64(v)
+ case IDENT:
+ return t.Text
+ case HEREDOC:
+ return unindentHeredoc(t.Text)
+ case STRING:
+ // Determine the Unquote method to use. If it came from JSON,
+ // then we need to use the built-in unquote since we have to
+ // escape interpolations there.
+ f := hclstrconv.Unquote
+ if t.JSON {
+ f = strconv.Unquote
+ }
+
+ // This case occurs if json null is used
+ if t.Text == "" {
+ return ""
+ }
+
+ v, err := f(t.Text)
+ if err != nil {
+ panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
+ }
+
+ return v
+ default:
+ panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
+ }
+}
+
+// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
+// and the content of a HEREDOC with the hanging indent removed if it is started with
+// a <<-, and the terminating line is at least as indented as the least indented line.
+func unindentHeredoc(heredoc string) string {
+ // We need to find the end of the marker
+ idx := strings.IndexByte(heredoc, '\n')
+ if idx == -1 {
+ panic("heredoc doesn't contain newline")
+ }
+
+ unindent := heredoc[2] == '-'
+
+ // We can optimize if the heredoc isn't marked for indentation
+ if !unindent {
+ return string(heredoc[idx+1 : len(heredoc)-idx+1])
+ }
+
+ // We need to unindent each line based on the indentation level of the marker
+ lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
+ whitespacePrefix := lines[len(lines)-1]
+
+ isIndented := true
+ for _, v := range lines {
+ if strings.HasPrefix(v, whitespacePrefix) {
+ continue
+ }
+
+ isIndented = false
+ break
+ }
+
+ // If all lines are not at least as indented as the terminating mark, return the
+ // heredoc as is, but trim the leading space from the marker on the final line.
+ if !isIndented {
+ return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
+ }
+
+ unindentedLines := make([]string, len(lines))
+ for k, v := range lines {
+ if k == len(lines)-1 {
+ unindentedLines[k] = ""
+ break
+ }
+
+ unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
+ }
+
+ return strings.Join(unindentedLines, "\n")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
new file mode 100644
index 00000000..f652d6fe
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
@@ -0,0 +1,117 @@
+package parser
+
+import "github.com/hashicorp/hcl/hcl/ast"
+
+// flattenObjects takes an AST node, walks it, and flattens
+func flattenObjects(node ast.Node) {
+ ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
+ // We only care about lists, because this is what we modify
+ list, ok := n.(*ast.ObjectList)
+ if !ok {
+ return n, true
+ }
+
+ // Rebuild the item list
+ items := make([]*ast.ObjectItem, 0, len(list.Items))
+ frontier := make([]*ast.ObjectItem, len(list.Items))
+ copy(frontier, list.Items)
+ for len(frontier) > 0 {
+ // Pop the current item
+ n := len(frontier)
+ item := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ switch v := item.Val.(type) {
+ case *ast.ObjectType:
+ items, frontier = flattenObjectType(v, item, items, frontier)
+ case *ast.ListType:
+ items, frontier = flattenListType(v, item, items, frontier)
+ default:
+ items = append(items, item)
+ }
+ }
+
+ // Reverse the list since the frontier model runs things backwards
+ for i := len(items)/2 - 1; i >= 0; i-- {
+ opp := len(items) - 1 - i
+ items[i], items[opp] = items[opp], items[i]
+ }
+
+ // Done! Set the original items
+ list.Items = items
+ return n, true
+ })
+}
+
+func flattenListType(
+ ot *ast.ListType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list is empty, keep the original list
+ if len(ot.List) == 0 {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List {
+ if _, ok := subitem.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, elem := range ot.List {
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: item.Keys,
+ Assign: item.Assign,
+ Val: elem,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
+
+func flattenObjectType(
+ ot *ast.ObjectType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list has no items we do not have to flatten anything
+ if ot.List.Items == nil {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List.Items {
+ if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, subitem := range ot.List.Items {
+ // Copy the new key
+ keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+ copy(keys, item.Keys)
+ copy(keys[len(item.Keys):], subitem.Keys)
+
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: keys,
+ Assign: item.Assign,
+ Val: subitem.Val,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
new file mode 100644
index 00000000..125a5f07
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -0,0 +1,313 @@
+package parser
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+ "github.com/hashicorp/hcl/json/scanner"
+ "github.com/hashicorp/hcl/json/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = fmt.Errorf("%s: %s", pos, msg)
+ }
+
+ // The root must be an object in JSON
+ object, err := p.object()
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // We make our final node an object list so it is more HCL compatible
+ f.Node = object.List
+
+ // Flatten it, which finds patterns and turns them into more HCL-like
+ // AST trees.
+ flattenObjects(f.Node)
+
+ return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // Check for a followup comma. If it isn't a comma, then we're done
+ if tok := p.scan(); tok.Type != token.COMMA {
+ break
+ }
+ }
+
+ return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ switch p.tok.Type {
+ case token.COLON:
+ pos := p.tok.Pos
+ o.Assign = hcltoken.Pos{
+ Filename: pos.Filename,
+ Offset: pos.Offset,
+ Line: pos.Line,
+ Column: pos.Column,
+ }
+
+ o.Val, err = p.objectValue()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ return nil, errEofToken
+ case token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{
+ Token: p.tok.HCLToken(),
+ })
+ case token.COLON:
+ // If we have a zero keycount it means that we never got
+ // an object key, i.e. `{ :`. This is a syntax error.
+ if keyCount == 0 {
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+
+ // Done
+ return keys, nil
+ case token.ILLEGAL:
+ return nil, errors.New("illegal")
+ default:
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+ defer un(trace(p, "ParseObjectValue"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.LBRACE:
+ return p.objectType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{}
+
+ l, err := p.objectList()
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ o.List = l
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{}
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.STRING:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.COMMA:
+ continue
+ case token.LBRACE:
+ node, err := p.objectType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.BOOL:
+ // TODO(arslan) should we support? not supported by HCL yet
+ case token.LBRACK:
+ // TODO(arslan) should we support nested lists? Even though it's
+ // written in README of HCL, it's not a part of the grammar
+ // (not defined in parse.y)
+ case token.RBRACK:
+ // finished
+ return l, nil
+ default:
+ return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+ }
+
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok.HCLToken(),
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ p.tok = p.sc.Scan()
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
new file mode 100644
index 00000000..dd5c72bb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
@@ -0,0 +1,451 @@
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/json/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ if ch == utf8.RuneError && size == 1 {
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ } else if lit == "null" {
+ tok = token.NULL
+ } else {
+ s.err("illegal char")
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case ':':
+ tok = token.COLON
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ s.err("illegal char")
+ }
+ default:
+ s.err("illegal char: " + string(ch))
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ zero := ch == '0'
+ pos := s.srcPos
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ // If we have a larger number and this is zero, error
+ if zero && pos != s.srcPos {
+ s.err("numbers cannot start with 0")
+ }
+
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa begining from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if ch == '\n' || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ // we scanned all digits, put the last non digit char back
+ s.unread()
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go
new file mode 100644
index 00000000..59c1bb72
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go
new file mode 100644
index 00000000..95a0c3ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token.go
@@ -0,0 +1,118 @@
+package token
+
+import (
+ "fmt"
+ "strconv"
+
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+
+ identifier_beg
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ NULL // null
+ literal_end
+ identifier_end
+
+ operator_beg
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+ COLON // :
+
+ RBRACK // ]
+ RBRACE // }
+
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+
+ NUMBER: "NUMBER",
+ FLOAT: "FLOAT",
+ BOOL: "BOOL",
+ STRING: "STRING",
+ NULL: "NULL",
+
+ LBRACK: "LBRACK",
+ LBRACE: "LBRACE",
+ COMMA: "COMMA",
+ PERIOD: "PERIOD",
+ COLON: "COLON",
+
+ RBRACK: "RBRACK",
+ RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+ s := ""
+ if 0 <= t && t < Type(len(tokens)) {
+ s = tokens[t]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(t)) + ")"
+ }
+ return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+ return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+ switch t.Type {
+ case BOOL:
+ return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+ case FLOAT:
+ return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+ case NULL:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+ case NUMBER:
+ return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+ case STRING:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+ default:
+ panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go
new file mode 100644
index 00000000..d9993c29
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/lex.go
@@ -0,0 +1,38 @@
+package hcl
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+type lexModeValue byte
+
+const (
+ lexModeUnknown lexModeValue = iota
+ lexModeHcl
+ lexModeJson
+)
+
+// lexMode returns whether we're going to be parsing in JSON
+// mode or HCL mode.
+func lexMode(v []byte) lexModeValue {
+ var (
+ r rune
+ w int
+ offset int
+ )
+
+ for {
+ r, w = utf8.DecodeRune(v[offset:])
+ offset += w
+ if unicode.IsSpace(r) {
+ continue
+ }
+ if r == '{' {
+ return lexModeJson
+ }
+ break
+ }
+
+ return lexModeHcl
+}
diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go
new file mode 100644
index 00000000..1fca53c4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/parse.go
@@ -0,0 +1,39 @@
+package hcl
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hclParser "github.com/hashicorp/hcl/hcl/parser"
+ jsonParser "github.com/hashicorp/hcl/json/parser"
+)
+
+// ParseBytes accepts as input byte slice and returns ast tree.
+//
+// Input can be either JSON or HCL
+func ParseBytes(in []byte) (*ast.File, error) {
+ return parse(in)
+}
+
+// ParseString accepts input as a string and returns ast tree.
+func ParseString(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
+
+func parse(in []byte) (*ast.File, error) {
+ switch lexMode(in) {
+ case lexModeHcl:
+ return hclParser.Parse(in)
+ case lexModeJson:
+ return jsonParser.Parse(in)
+ }
+
+ return nil, fmt.Errorf("unknown config format")
+}
+
+// Parse parses the given input and returns the root object.
+//
+// The input format can be either HCL or JSON.
+func Parse(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
diff --git a/vendor/github.com/hashicorp/hil/LICENSE b/vendor/github.com/hashicorp/hil/LICENSE
new file mode 100644
index 00000000..82b4de97
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/LICENSE
@@ -0,0 +1,353 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/hil/README.md b/vendor/github.com/hashicorp/hil/README.md
new file mode 100644
index 00000000..186ed251
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/README.md
@@ -0,0 +1,102 @@
+# HIL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil)
+
+HIL (HashiCorp Interpolation Language) is a lightweight embedded language used
+primarily for configuration interpolation. The goal of HIL is to make a simple
+language for interpolations in the various configurations of HashiCorp tools.
+
+HIL is built to interpolate any string, but is in use by HashiCorp primarily
+with [HCL](https://github.com/hashicorp/hcl). HCL is _not required_ in any
+way for use with HIL.
+
+HIL isn't meant to be a general purpose language. It was built for basic
+configuration interpolations. Therefore, you can't currently write functions,
+have conditionals, set intermediary variables, etc. within HIL itself. It is
+possible some of these may be added later but the right use case must exist.
+
+## Why?
+
+Many of our tools have support for something similar to templates, but
+within the configuration itself. The most prominent requirement was in
+[Terraform](https://github.com/hashicorp/terraform) where we wanted the
+configuration to be able to reference values from elsewhere in the
+configuration. Example:
+
+ foo = "hi ${var.world}"
+
+We originally used a full templating language for this, but found it
+was too heavy weight. Additionally, many full languages required bindings
+to C (and thus the usage of cgo) which we try to avoid to make cross-compilation
+easier. We then moved to very basic regular expression based
+string replacement, but found the need for basic arithmetic and function
+calls resulting in overly complex regular expressions.
+
+Ultimately, we wrote our own mini-language within Terraform itself. As
+we built other projects such as [Nomad](https://nomadproject.io) and
+[Otto](https://ottoproject.io), the need for basic interpolations arose
+again.
+
+Thus HIL was born. It is extracted from Terraform, cleaned up, and
+better tested for general purpose use.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammer is listed here.
+
+Code begins within `${` and `}`. Outside of this, text is treated
+literally. For example, `foo` is a valid HIL program that is just the
+string "foo", but `foo ${bar}` is an HIL program that is the string "foo "
+concatened with the value of `bar`. For the remainder of the syntax
+docs, we'll assume you're within `${}`.
+
+ * Identifiers are any text in the format of `[a-zA-Z0-9-.]`. Example
+ identifiers: `foo`, `var.foo`, `foo-bar`.
+
+ * Strings are double quoted and can contain any UTF-8 characters.
+ Example: `"Hello, World"`
+
+ * Numbers are assumed to be base 10. If you prefix a number with 0x,
+ it is treated as a hexadecimal. If it is prefixed with 0, it is
+ treated as an octal. Numbers can be in scientific notation: "1e10".
+
+ * Unary `-` can be used for negative numbers. Example: `-10` or `-0.2`
+
+ * Boolean values: `true`, `false`
+
+ * The following arithmetic operations are allowed: +, -, *, /, %.
+
+ * Function calls are in the form of `name(arg1, arg2, ...)`. Example:
+ `add(1, 5)`. Arguments can be any valid HIL expression, example:
+ `add(1, var.foo)` or even nested function calls:
+ `add(1, get("some value"))`.
+
+ * Within strings, further interpolations can be opened with `${}`.
+ Example: `"Hello ${nested}"`. A full example including the
+ original `${}` (remember this list assumes were inside of one
+ already) could be: `foo ${func("hello ${var.foo}")}`.
+
+## Language Changes
+
+We've used this mini-language in Terraform for years. For backwards compatibility
+reasons, we're unlikely to make an incompatible change to the language but
+we're not currently making that promise, either.
+
+The internal API of this project may very well change as we evolve it
+to work with more of our projects. We recommend using some sort of dependency
+management solution with this package.
+
+## Future Changes
+
+The following changes are already planned to be made at some point:
+
+ * Richer types: lists, maps, etc.
+
+ * Convert to a more standard Go parser structure similar to HCL. This
+ will improve our error messaging as well as allow us to have automatic
+ formatting.
+
+ * Allow interpolations to result in more types than just a string. While
+ within the interpolation basic types are honored, the result is always
+ a string.
diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic.go b/vendor/github.com/hashicorp/hil/ast/arithmetic.go
new file mode 100644
index 00000000..94dc24f8
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/arithmetic.go
@@ -0,0 +1,43 @@
+package ast
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Arithmetic represents a node where the result is arithmetic of
+// two or more operands in the order given.
+type Arithmetic struct {
+ Op ArithmeticOp
+ Exprs []Node
+ Posx Pos
+}
+
+func (n *Arithmetic) Accept(v Visitor) Node {
+ for i, expr := range n.Exprs {
+ n.Exprs[i] = expr.Accept(v)
+ }
+
+ return v(n)
+}
+
+func (n *Arithmetic) Pos() Pos {
+ return n.Posx
+}
+
+func (n *Arithmetic) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *Arithmetic) String() string {
+ var b bytes.Buffer
+ for _, expr := range n.Exprs {
+ b.WriteString(fmt.Sprintf("%s", expr))
+ }
+
+ return b.String()
+}
+
+func (n *Arithmetic) Type(Scope) (Type, error) {
+ return TypeInt, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go
new file mode 100644
index 00000000..18880c60
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go
@@ -0,0 +1,24 @@
+package ast
+
+// ArithmeticOp is the operation to use for the math.
+type ArithmeticOp int
+
+const (
+ ArithmeticOpInvalid ArithmeticOp = 0
+
+ ArithmeticOpAdd ArithmeticOp = iota
+ ArithmeticOpSub
+ ArithmeticOpMul
+ ArithmeticOpDiv
+ ArithmeticOpMod
+
+ ArithmeticOpLogicalAnd
+ ArithmeticOpLogicalOr
+
+ ArithmeticOpEqual
+ ArithmeticOpNotEqual
+ ArithmeticOpLessThan
+ ArithmeticOpLessThanOrEqual
+ ArithmeticOpGreaterThan
+ ArithmeticOpGreaterThanOrEqual
+)
diff --git a/vendor/github.com/hashicorp/hil/ast/ast.go b/vendor/github.com/hashicorp/hil/ast/ast.go
new file mode 100644
index 00000000..c6350f8b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/ast.go
@@ -0,0 +1,99 @@
+package ast
+
+import (
+ "fmt"
+)
+
+// Node is the interface that all AST nodes must implement.
+type Node interface {
+ // Accept is called to dispatch to the visitors. It must return the
+ // resulting Node (which might be different in an AST transform).
+ Accept(Visitor) Node
+
+ // Pos returns the position of this node in some source.
+ Pos() Pos
+
+ // Type returns the type of this node for the given context.
+ Type(Scope) (Type, error)
+}
+
+// Pos is the starting position of an AST node
+type Pos struct {
+ Column, Line int // Column/Line number, starting at 1
+ Filename string // Optional source filename, if known
+}
+
+func (p Pos) String() string {
+ if p.Filename == "" {
+ return fmt.Sprintf("%d:%d", p.Line, p.Column)
+ } else {
+ return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column)
+ }
+}
+
+// InitPos is an initiaial position value. This should be used as
+// the starting position (presets the column and line to 1).
+var InitPos = Pos{Column: 1, Line: 1}
+
+// Visitors are just implementations of this function.
+//
+// The function must return the Node to replace this node with. "nil" is
+// _not_ a valid return value. If there is no replacement, the original node
+// should be returned. We build this replacement directly into the visitor
+// pattern since AST transformations are a common and useful tool and
+// building it into the AST itself makes it required for future Node
+// implementations and very easy to do.
+//
+// Note that this isn't a true implementation of the visitor pattern, which
+// generally requires proper type dispatch on the function. However,
+// implementing this basic visitor pattern style is still very useful even
+// if you have to type switch.
+type Visitor func(Node) Node
+
+//go:generate stringer -type=Type
+
+// Type is the type of any value.
+type Type uint32
+
+const (
+ TypeInvalid Type = 0
+ TypeAny Type = 1 << iota
+ TypeBool
+ TypeString
+ TypeInt
+ TypeFloat
+ TypeList
+ TypeMap
+
+ // This is a special type used by Terraform to mark "unknown" values.
+ // It is impossible for this type to be introduced into your HIL programs
+ // unless you explicitly set a variable to this value. In that case,
+ // any operation including the variable will return "TypeUnknown" as the
+ // type.
+ TypeUnknown
+)
+
+func (t Type) Printable() string {
+ switch t {
+ case TypeInvalid:
+ return "invalid type"
+ case TypeAny:
+ return "any type"
+ case TypeBool:
+ return "type bool"
+ case TypeString:
+ return "type string"
+ case TypeInt:
+ return "type int"
+ case TypeFloat:
+ return "type float"
+ case TypeList:
+ return "type list"
+ case TypeMap:
+ return "type map"
+ case TypeUnknown:
+ return "type unknown"
+ default:
+ return "unknown type"
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/call.go b/vendor/github.com/hashicorp/hil/ast/call.go
new file mode 100644
index 00000000..05570110
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/call.go
@@ -0,0 +1,47 @@
+package ast
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Call represents a function call.
+type Call struct {
+ Func string
+ Args []Node
+ Posx Pos
+}
+
+func (n *Call) Accept(v Visitor) Node {
+ for i, a := range n.Args {
+ n.Args[i] = a.Accept(v)
+ }
+
+ return v(n)
+}
+
+func (n *Call) Pos() Pos {
+ return n.Posx
+}
+
+func (n *Call) String() string {
+ args := make([]string, len(n.Args))
+ for i, arg := range n.Args {
+ args[i] = fmt.Sprintf("%s", arg)
+ }
+
+ return fmt.Sprintf("Call(%s, %s)", n.Func, strings.Join(args, ", "))
+}
+
+func (n *Call) Type(s Scope) (Type, error) {
+ f, ok := s.LookupFunc(n.Func)
+ if !ok {
+ return TypeInvalid, fmt.Errorf("unknown function: %s", n.Func)
+ }
+
+ return f.ReturnType, nil
+}
+
+func (n *Call) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/conditional.go b/vendor/github.com/hashicorp/hil/ast/conditional.go
new file mode 100644
index 00000000..be48f89d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/conditional.go
@@ -0,0 +1,36 @@
+package ast
+
+import (
+ "fmt"
+)
+
+type Conditional struct {
+ CondExpr Node
+ TrueExpr Node
+ FalseExpr Node
+ Posx Pos
+}
+
+// Accept passes the given visitor to the child nodes in this order:
+// CondExpr, TrueExpr, FalseExpr. It then finally passes itself to the visitor.
+func (n *Conditional) Accept(v Visitor) Node {
+ n.CondExpr = n.CondExpr.Accept(v)
+ n.TrueExpr = n.TrueExpr.Accept(v)
+ n.FalseExpr = n.FalseExpr.Accept(v)
+
+ return v(n)
+}
+
+func (n *Conditional) Pos() Pos {
+ return n.Posx
+}
+
+func (n *Conditional) Type(Scope) (Type, error) {
+ // This is not actually a useful value; the type checker ignores
+ // this function when analyzing conditionals, just as with Arithmetic.
+ return TypeInt, nil
+}
+
+func (n *Conditional) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/index.go b/vendor/github.com/hashicorp/hil/ast/index.go
new file mode 100644
index 00000000..860c25fd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/index.go
@@ -0,0 +1,76 @@
+package ast
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Index represents an indexing operation into another data structure
+type Index struct {
+ Target Node
+ Key Node
+ Posx Pos
+}
+
+func (n *Index) Accept(v Visitor) Node {
+ n.Target = n.Target.Accept(v)
+ n.Key = n.Key.Accept(v)
+ return v(n)
+}
+
+func (n *Index) Pos() Pos {
+ return n.Posx
+}
+
+func (n *Index) String() string {
+ return fmt.Sprintf("Index(%s, %s)", n.Target, n.Key)
+}
+
+func (n *Index) Type(s Scope) (Type, error) {
+ variableAccess, ok := n.Target.(*VariableAccess)
+ if !ok {
+ return TypeInvalid, fmt.Errorf("target is not a variable")
+ }
+
+ variable, ok := s.LookupVar(variableAccess.Name)
+ if !ok {
+ return TypeInvalid, fmt.Errorf("unknown variable accessed: %s", variableAccess.Name)
+ }
+
+ switch variable.Type {
+ case TypeList:
+ return n.typeList(variable, variableAccess.Name)
+ case TypeMap:
+ return n.typeMap(variable, variableAccess.Name)
+ default:
+ return TypeInvalid, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type)
+ }
+}
+
+func (n *Index) typeList(variable Variable, variableName string) (Type, error) {
+ // We assume type checking has already determined that this is a list
+ list := variable.Value.([]Variable)
+
+ return VariableListElementTypesAreHomogenous(variableName, list)
+}
+
+func (n *Index) typeMap(variable Variable, variableName string) (Type, error) {
+ // We assume type checking has already determined that this is a map
+ vmap := variable.Value.(map[string]Variable)
+
+ return VariableMapValueTypesAreHomogenous(variableName, vmap)
+}
+
+func reportTypes(typesFound map[Type]struct{}) string {
+ stringTypes := make([]string, len(typesFound))
+ i := 0
+ for k, _ := range typesFound {
+ stringTypes[0] = k.String()
+ i++
+ }
+ return strings.Join(stringTypes, ", ")
+}
+
+func (n *Index) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/literal.go b/vendor/github.com/hashicorp/hil/ast/literal.go
new file mode 100644
index 00000000..da6014fe
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/literal.go
@@ -0,0 +1,88 @@
+package ast
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// LiteralNode represents a single literal value, such as "foo" or
+// 42 or 3.14159. Based on the Type, the Value can be safely cast.
+type LiteralNode struct {
+ Value interface{}
+ Typex Type
+ Posx Pos
+}
+
+// NewLiteralNode returns a new literal node representing the given
+// literal Go value, which must correspond to one of the primitive types
+// supported by HIL. Lists and maps cannot currently be constructed via
+// this function.
+//
+// If an inappropriately-typed value is provided, this function will
+// return an error. The main intended use of this function is to produce
+// "synthetic" literals from constants in code, where the value type is
+// well known at compile time. To easily store these in global variables,
+// see also MustNewLiteralNode.
+func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) {
+ goType := reflect.TypeOf(value)
+ var hilType Type
+
+ switch goType.Kind() {
+ case reflect.Bool:
+ hilType = TypeBool
+ case reflect.Int:
+ hilType = TypeInt
+ case reflect.Float64:
+ hilType = TypeFloat
+ case reflect.String:
+ hilType = TypeString
+ default:
+ return nil, fmt.Errorf("unsupported literal node type: %T", value)
+ }
+
+ return &LiteralNode{
+ Value: value,
+ Typex: hilType,
+ Posx: pos,
+ }, nil
+}
+
+// MustNewLiteralNode wraps NewLiteralNode and panics if an error is
+// returned, thus allowing valid literal nodes to be easily assigned to
+// global variables.
+func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode {
+ node, err := NewLiteralNode(value, pos)
+ if err != nil {
+ panic(err)
+ }
+ return node
+}
+
+func (n *LiteralNode) Accept(v Visitor) Node {
+ return v(n)
+}
+
+func (n *LiteralNode) Pos() Pos {
+ return n.Posx
+}
+
+func (n *LiteralNode) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *LiteralNode) String() string {
+ return fmt.Sprintf("Literal(%s, %v)", n.Typex, n.Value)
+}
+
+func (n *LiteralNode) Type(Scope) (Type, error) {
+ return n.Typex, nil
+}
+
+// IsUnknown returns true either if the node's value is itself unknown
+// of if it is a collection containing any unknown elements, deeply.
+func (n *LiteralNode) IsUnknown() bool {
+ return IsUnknown(Variable{
+ Type: n.Typex,
+ Value: n.Value,
+ })
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/output.go b/vendor/github.com/hashicorp/hil/ast/output.go
new file mode 100644
index 00000000..1e27f970
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/output.go
@@ -0,0 +1,78 @@
+package ast
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Output represents the root node of all interpolation evaluations. If the
+// output only has one expression which is either a TypeList or TypeMap, the
+// Output can be type-asserted to []interface{} or map[string]interface{}
+// respectively. Otherwise the Output evaluates as a string, and concatenates
+// the evaluation of each expression.
+type Output struct {
+ Exprs []Node
+ Posx Pos
+}
+
+func (n *Output) Accept(v Visitor) Node {
+ for i, expr := range n.Exprs {
+ n.Exprs[i] = expr.Accept(v)
+ }
+
+ return v(n)
+}
+
+func (n *Output) Pos() Pos {
+ return n.Posx
+}
+
+func (n *Output) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *Output) String() string {
+ var b bytes.Buffer
+ for _, expr := range n.Exprs {
+ b.WriteString(fmt.Sprintf("%s", expr))
+ }
+
+ return b.String()
+}
+
+func (n *Output) Type(s Scope) (Type, error) {
+ // Special case no expressions for backward compatibility
+ if len(n.Exprs) == 0 {
+ return TypeString, nil
+ }
+
+ // Special case a single expression of types list or map
+ if len(n.Exprs) == 1 {
+ exprType, err := n.Exprs[0].Type(s)
+ if err != nil {
+ return TypeInvalid, err
+ }
+ switch exprType {
+ case TypeList:
+ return TypeList, nil
+ case TypeMap:
+ return TypeMap, nil
+ }
+ }
+
+ // Otherwise ensure all our expressions are strings
+ for index, expr := range n.Exprs {
+ exprType, err := expr.Type(s)
+ if err != nil {
+ return TypeInvalid, err
+ }
+ // We only look for things we know we can't coerce with an implicit conversion func
+ if exprType == TypeList || exprType == TypeMap {
+ return TypeInvalid, fmt.Errorf(
+ "multi-expression HIL outputs may only have string inputs: %d is type %s",
+ index, exprType)
+ }
+ }
+
+ return TypeString, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/scope.go b/vendor/github.com/hashicorp/hil/ast/scope.go
new file mode 100644
index 00000000..7a975d99
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/scope.go
@@ -0,0 +1,90 @@
+package ast
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Scope is the interface used to look up variables and functions while
+// evaluating. How these functions/variables are defined are up to the caller.
+type Scope interface {
+ LookupFunc(string) (Function, bool)
+ LookupVar(string) (Variable, bool)
+}
+
+// Variable is a variable value for execution given as input to the engine.
+// It records the value of a variables along with their type.
+type Variable struct {
+ Value interface{}
+ Type Type
+}
+
+// NewVariable creates a new Variable for the given value. This will
+// attempt to infer the correct type. If it can't, an error will be returned.
+func NewVariable(v interface{}) (result Variable, err error) {
+ switch v := reflect.ValueOf(v); v.Kind() {
+ case reflect.String:
+ result.Type = TypeString
+ default:
+ err = fmt.Errorf("Unknown type: %s", v.Kind())
+ }
+
+ result.Value = v
+ return
+}
+
+// String implements Stringer on Variable, displaying the type and value
+// of the Variable.
+func (v Variable) String() string {
+ return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value)
+}
+
+// Function defines a function that can be executed by the engine.
+// The type checker will validate that the proper types will be called
+// to the callback.
+type Function struct {
+ // ArgTypes is the list of types in argument order. These are the
+ // required arguments.
+ //
+ // ReturnType is the type of the returned value. The Callback MUST
+ // return this type.
+ ArgTypes []Type
+ ReturnType Type
+
+ // Variadic, if true, says that this function is variadic, meaning
+ // it takes a variable number of arguments. In this case, the
+ // VariadicType must be set.
+ Variadic bool
+ VariadicType Type
+
+ // Callback is the function called for a function. The argument
+ // types are guaranteed to match the spec above by the type checker.
+ // The length of the args is strictly == len(ArgTypes) unless Varidiac
+ // is true, in which case its >= len(ArgTypes).
+ Callback func([]interface{}) (interface{}, error)
+}
+
+// BasicScope is a simple scope that looks up variables and functions
+// using a map.
+type BasicScope struct {
+ FuncMap map[string]Function
+ VarMap map[string]Variable
+}
+
+func (s *BasicScope) LookupFunc(n string) (Function, bool) {
+ if s == nil {
+ return Function{}, false
+ }
+
+ v, ok := s.FuncMap[n]
+ return v, ok
+}
+
+func (s *BasicScope) LookupVar(n string) (Variable, bool) {
+ if s == nil {
+ return Variable{}, false
+ }
+
+ v, ok := s.VarMap[n]
+ return v, ok
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/stack.go b/vendor/github.com/hashicorp/hil/ast/stack.go
new file mode 100644
index 00000000..bd2bc157
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/stack.go
@@ -0,0 +1,25 @@
+package ast
+
+// Stack is a stack of Node.
+type Stack struct {
+ stack []Node
+}
+
+func (s *Stack) Len() int {
+ return len(s.stack)
+}
+
+func (s *Stack) Push(n Node) {
+ s.stack = append(s.stack, n)
+}
+
+func (s *Stack) Pop() Node {
+ x := s.stack[len(s.stack)-1]
+ s.stack[len(s.stack)-1] = nil
+ s.stack = s.stack[:len(s.stack)-1]
+ return x
+}
+
+func (s *Stack) Reset() {
+ s.stack = nil
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/type_string.go b/vendor/github.com/hashicorp/hil/ast/type_string.go
new file mode 100644
index 00000000..1f51a98d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/type_string.go
@@ -0,0 +1,54 @@
+// Code generated by "stringer -type=Type"; DO NOT EDIT
+
+package ast
+
+import "fmt"
+
+const (
+ _Type_name_0 = "TypeInvalid"
+ _Type_name_1 = "TypeAny"
+ _Type_name_2 = "TypeBool"
+ _Type_name_3 = "TypeString"
+ _Type_name_4 = "TypeInt"
+ _Type_name_5 = "TypeFloat"
+ _Type_name_6 = "TypeList"
+ _Type_name_7 = "TypeMap"
+ _Type_name_8 = "TypeUnknown"
+)
+
+var (
+ _Type_index_0 = [...]uint8{0, 11}
+ _Type_index_1 = [...]uint8{0, 7}
+ _Type_index_2 = [...]uint8{0, 8}
+ _Type_index_3 = [...]uint8{0, 10}
+ _Type_index_4 = [...]uint8{0, 7}
+ _Type_index_5 = [...]uint8{0, 9}
+ _Type_index_6 = [...]uint8{0, 8}
+ _Type_index_7 = [...]uint8{0, 7}
+ _Type_index_8 = [...]uint8{0, 11}
+)
+
+func (i Type) String() string {
+ switch {
+ case i == 0:
+ return _Type_name_0
+ case i == 2:
+ return _Type_name_1
+ case i == 4:
+ return _Type_name_2
+ case i == 8:
+ return _Type_name_3
+ case i == 16:
+ return _Type_name_4
+ case i == 32:
+ return _Type_name_5
+ case i == 64:
+ return _Type_name_6
+ case i == 128:
+ return _Type_name_7
+ case i == 256:
+ return _Type_name_8
+ default:
+ return fmt.Sprintf("Type(%d)", i)
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/unknown.go b/vendor/github.com/hashicorp/hil/ast/unknown.go
new file mode 100644
index 00000000..d6ddaecc
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/unknown.go
@@ -0,0 +1,30 @@
+package ast
+
+// IsUnknown reports whether a variable is unknown or contains any value
+// that is unknown. This will recurse into lists and maps and so on.
+func IsUnknown(v Variable) bool {
+ // If it is unknown itself, return true
+ if v.Type == TypeUnknown {
+ return true
+ }
+
+ // If it is a container type, check the values
+ switch v.Type {
+ case TypeList:
+ for _, el := range v.Value.([]Variable) {
+ if IsUnknown(el) {
+ return true
+ }
+ }
+ case TypeMap:
+ for _, el := range v.Value.(map[string]Variable) {
+ if IsUnknown(el) {
+ return true
+ }
+ }
+ default:
+ }
+
+ // Not a container type or survive the above checks
+ return false
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/variable_access.go b/vendor/github.com/hashicorp/hil/ast/variable_access.go
new file mode 100644
index 00000000..4c1362d7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/variable_access.go
@@ -0,0 +1,36 @@
+package ast
+
+import (
+ "fmt"
+)
+
+// VariableAccess represents a variable access.
+type VariableAccess struct {
+ Name string
+ Posx Pos
+}
+
+func (n *VariableAccess) Accept(v Visitor) Node {
+ return v(n)
+}
+
+func (n *VariableAccess) Pos() Pos {
+ return n.Posx
+}
+
+func (n *VariableAccess) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *VariableAccess) String() string {
+ return fmt.Sprintf("Variable(%s)", n.Name)
+}
+
+func (n *VariableAccess) Type(s Scope) (Type, error) {
+ v, ok := s.LookupVar(n.Name)
+ if !ok {
+ return TypeInvalid, fmt.Errorf("unknown variable: %s", n.Name)
+ }
+
+ return v.Type, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/vendor/github.com/hashicorp/hil/ast/variables_helper.go
new file mode 100644
index 00000000..06bd18de
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/variables_helper.go
@@ -0,0 +1,63 @@
+package ast
+
+import "fmt"
+
+func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) {
+ if len(list) == 0 {
+ return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName)
+ }
+
+ elemType := TypeUnknown
+ for _, v := range list {
+ if v.Type == TypeUnknown {
+ continue
+ }
+
+ if elemType == TypeUnknown {
+ elemType = v.Type
+ continue
+ }
+
+ if v.Type != elemType {
+ return TypeInvalid, fmt.Errorf(
+ "list %q does not have homogenous types. found %s and then %s",
+ variableName,
+ elemType, v.Type,
+ )
+ }
+
+ elemType = v.Type
+ }
+
+ return elemType, nil
+}
+
+func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) {
+ if len(vmap) == 0 {
+ return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName)
+ }
+
+ elemType := TypeUnknown
+ for _, v := range vmap {
+ if v.Type == TypeUnknown {
+ continue
+ }
+
+ if elemType == TypeUnknown {
+ elemType = v.Type
+ continue
+ }
+
+ if v.Type != elemType {
+ return TypeInvalid, fmt.Errorf(
+ "map %q does not have homogenous types. found %s and then %s",
+ variableName,
+ elemType, v.Type,
+ )
+ }
+
+ elemType = v.Type
+ }
+
+ return elemType, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/builtins.go b/vendor/github.com/hashicorp/hil/builtins.go
new file mode 100644
index 00000000..909c788a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/builtins.go
@@ -0,0 +1,331 @@
+package hil
+
+import (
+ "errors"
+ "strconv"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+// NOTE: All builtins are tested in engine_test.go
+
+func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope {
+ if scope == nil {
+ scope = new(ast.BasicScope)
+ }
+ if scope.FuncMap == nil {
+ scope.FuncMap = make(map[string]ast.Function)
+ }
+
+ // Implicit conversions
+ scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString()
+ scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt()
+ scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString()
+ scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat()
+ scope.FuncMap["__builtin_IntToString"] = builtinIntToString()
+ scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt()
+ scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat()
+ scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool()
+
+ // Math operations
+ scope.FuncMap["__builtin_IntMath"] = builtinIntMath()
+ scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath()
+ scope.FuncMap["__builtin_BoolCompare"] = builtinBoolCompare()
+ scope.FuncMap["__builtin_FloatCompare"] = builtinFloatCompare()
+ scope.FuncMap["__builtin_IntCompare"] = builtinIntCompare()
+ scope.FuncMap["__builtin_StringCompare"] = builtinStringCompare()
+ scope.FuncMap["__builtin_Logical"] = builtinLogical()
+ return scope
+}
+
+func builtinFloatMath() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ Variadic: true,
+ VariadicType: ast.TypeFloat,
+ ReturnType: ast.TypeFloat,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ result := args[1].(float64)
+ for _, raw := range args[2:] {
+ arg := raw.(float64)
+ switch op {
+ case ast.ArithmeticOpAdd:
+ result += arg
+ case ast.ArithmeticOpSub:
+ result -= arg
+ case ast.ArithmeticOpMul:
+ result *= arg
+ case ast.ArithmeticOpDiv:
+ result /= arg
+ }
+ }
+
+ return result, nil
+ },
+ }
+}
+
+func builtinIntMath() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ Variadic: true,
+ VariadicType: ast.TypeInt,
+ ReturnType: ast.TypeInt,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ result := args[1].(int)
+ for _, raw := range args[2:] {
+ arg := raw.(int)
+ switch op {
+ case ast.ArithmeticOpAdd:
+ result += arg
+ case ast.ArithmeticOpSub:
+ result -= arg
+ case ast.ArithmeticOpMul:
+ result *= arg
+ case ast.ArithmeticOpDiv:
+ if arg == 0 {
+ return nil, errors.New("divide by zero")
+ }
+
+ result /= arg
+ case ast.ArithmeticOpMod:
+ if arg == 0 {
+ return nil, errors.New("divide by zero")
+ }
+
+ result = result % arg
+ }
+ }
+
+ return result, nil
+ },
+ }
+}
+
+func builtinBoolCompare() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt, ast.TypeBool, ast.TypeBool},
+ Variadic: false,
+ ReturnType: ast.TypeBool,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ lhs := args[1].(bool)
+ rhs := args[2].(bool)
+
+ switch op {
+ case ast.ArithmeticOpEqual:
+ return lhs == rhs, nil
+ case ast.ArithmeticOpNotEqual:
+ return lhs != rhs, nil
+ default:
+ return nil, errors.New("invalid comparison operation")
+ }
+ },
+ }
+}
+
+func builtinFloatCompare() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt, ast.TypeFloat, ast.TypeFloat},
+ Variadic: false,
+ ReturnType: ast.TypeBool,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ lhs := args[1].(float64)
+ rhs := args[2].(float64)
+
+ switch op {
+ case ast.ArithmeticOpEqual:
+ return lhs == rhs, nil
+ case ast.ArithmeticOpNotEqual:
+ return lhs != rhs, nil
+ case ast.ArithmeticOpLessThan:
+ return lhs < rhs, nil
+ case ast.ArithmeticOpLessThanOrEqual:
+ return lhs <= rhs, nil
+ case ast.ArithmeticOpGreaterThan:
+ return lhs > rhs, nil
+ case ast.ArithmeticOpGreaterThanOrEqual:
+ return lhs >= rhs, nil
+ default:
+ return nil, errors.New("invalid comparison operation")
+ }
+ },
+ }
+}
+
+func builtinIntCompare() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt, ast.TypeInt, ast.TypeInt},
+ Variadic: false,
+ ReturnType: ast.TypeBool,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ lhs := args[1].(int)
+ rhs := args[2].(int)
+
+ switch op {
+ case ast.ArithmeticOpEqual:
+ return lhs == rhs, nil
+ case ast.ArithmeticOpNotEqual:
+ return lhs != rhs, nil
+ case ast.ArithmeticOpLessThan:
+ return lhs < rhs, nil
+ case ast.ArithmeticOpLessThanOrEqual:
+ return lhs <= rhs, nil
+ case ast.ArithmeticOpGreaterThan:
+ return lhs > rhs, nil
+ case ast.ArithmeticOpGreaterThanOrEqual:
+ return lhs >= rhs, nil
+ default:
+ return nil, errors.New("invalid comparison operation")
+ }
+ },
+ }
+}
+
+func builtinStringCompare() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString, ast.TypeString},
+ Variadic: false,
+ ReturnType: ast.TypeBool,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ lhs := args[1].(string)
+ rhs := args[2].(string)
+
+ switch op {
+ case ast.ArithmeticOpEqual:
+ return lhs == rhs, nil
+ case ast.ArithmeticOpNotEqual:
+ return lhs != rhs, nil
+ default:
+ return nil, errors.New("invalid comparison operation")
+ }
+ },
+ }
+}
+
+func builtinLogical() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ Variadic: true,
+ VariadicType: ast.TypeBool,
+ ReturnType: ast.TypeBool,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ result := args[1].(bool)
+ for _, raw := range args[2:] {
+ arg := raw.(bool)
+ switch op {
+ case ast.ArithmeticOpLogicalOr:
+ result = result || arg
+ case ast.ArithmeticOpLogicalAnd:
+ result = result && arg
+ default:
+ return nil, errors.New("invalid logical operator")
+ }
+ }
+
+ return result, nil
+ },
+ }
+}
+
+func builtinFloatToInt() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ ReturnType: ast.TypeInt,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return int(args[0].(float64)), nil
+ },
+ }
+}
+
+func builtinFloatToString() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return strconv.FormatFloat(
+ args[0].(float64), 'g', -1, 64), nil
+ },
+ }
+}
+
+func builtinIntToFloat() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ ReturnType: ast.TypeFloat,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return float64(args[0].(int)), nil
+ },
+ }
+}
+
+func builtinIntToString() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return strconv.FormatInt(int64(args[0].(int)), 10), nil
+ },
+ }
+}
+
+func builtinStringToInt() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ v, err := strconv.ParseInt(args[0].(string), 0, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ return int(v), nil
+ },
+ }
+}
+
+func builtinStringToFloat() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeFloat,
+ Callback: func(args []interface{}) (interface{}, error) {
+ v, err := strconv.ParseFloat(args[0].(string), 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return v, nil
+ },
+ }
+}
+
+func builtinBoolToString() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeBool},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return strconv.FormatBool(args[0].(bool)), nil
+ },
+ }
+}
+
+func builtinStringToBool() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeBool,
+ Callback: func(args []interface{}) (interface{}, error) {
+ v, err := strconv.ParseBool(args[0].(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return v, nil
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/check_identifier.go b/vendor/github.com/hashicorp/hil/check_identifier.go
new file mode 100644
index 00000000..474f5058
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/check_identifier.go
@@ -0,0 +1,88 @@
+package hil
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+// IdentifierCheck is a SemanticCheck that checks that all identifiers
+// resolve properly and that the right number of arguments are passed
+// to functions.
+type IdentifierCheck struct {
+ Scope ast.Scope
+
+ err error
+ lock sync.Mutex
+}
+
+func (c *IdentifierCheck) Visit(root ast.Node) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ defer c.reset()
+ root.Accept(c.visit)
+ return c.err
+}
+
+func (c *IdentifierCheck) visit(raw ast.Node) ast.Node {
+ if c.err != nil {
+ return raw
+ }
+
+ switch n := raw.(type) {
+ case *ast.Call:
+ c.visitCall(n)
+ case *ast.VariableAccess:
+ c.visitVariableAccess(n)
+ case *ast.Output:
+ // Ignore
+ case *ast.LiteralNode:
+ // Ignore
+ default:
+ // Ignore
+ }
+
+ // We never do replacement with this visitor
+ return raw
+}
+
+func (c *IdentifierCheck) visitCall(n *ast.Call) {
+ // Look up the function in the map
+ function, ok := c.Scope.LookupFunc(n.Func)
+ if !ok {
+ c.createErr(n, fmt.Sprintf("unknown function called: %s", n.Func))
+ return
+ }
+
+ // Break up the args into what is variadic and what is required
+ args := n.Args
+ if function.Variadic && len(args) > len(function.ArgTypes) {
+ args = n.Args[:len(function.ArgTypes)]
+ }
+
+ // Verify the number of arguments
+ if len(args) != len(function.ArgTypes) {
+ c.createErr(n, fmt.Sprintf(
+ "%s: expected %d arguments, got %d",
+ n.Func, len(function.ArgTypes), len(n.Args)))
+ return
+ }
+}
+
+func (c *IdentifierCheck) visitVariableAccess(n *ast.VariableAccess) {
+ // Look up the variable in the map
+ if _, ok := c.Scope.LookupVar(n.Name); !ok {
+ c.createErr(n, fmt.Sprintf(
+ "unknown variable accessed: %s", n.Name))
+ return
+ }
+}
+
+func (c *IdentifierCheck) createErr(n ast.Node, str string) {
+ c.err = fmt.Errorf("%s: %s", n.Pos(), str)
+}
+
+func (c *IdentifierCheck) reset() {
+ c.err = nil
+}
diff --git a/vendor/github.com/hashicorp/hil/check_types.go b/vendor/github.com/hashicorp/hil/check_types.go
new file mode 100644
index 00000000..7a191e87
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/check_types.go
@@ -0,0 +1,662 @@
+package hil
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+// TypeCheck implements ast.Visitor for type checking an AST tree.
+// It requires some configuration to look up the type of nodes.
+//
+// It also optionally will not type error and will insert an implicit
+// type conversions for specific types if specified by the Implicit
+// field. Note that this is kind of organizationally weird to put into
+// this structure but we'd rather do that than duplicate the type checking
+// logic multiple times.
+type TypeCheck struct {
+ Scope ast.Scope
+
+ // Implicit is a map of implicit type conversions that we can do,
+ // and that shouldn't error. The key of the first map is the from type,
+ // the key of the second map is the to type, and the final string
+ // value is the function to call (which must be registered in the Scope).
+ Implicit map[ast.Type]map[ast.Type]string
+
+ // Stack of types. This shouldn't be used directly except by implementations
+ // of TypeCheckNode.
+ Stack []ast.Type
+
+ err error
+ lock sync.Mutex
+}
+
+// TypeCheckNode is the interface that must be implemented by any
+// ast.Node that wants to support type-checking. If the type checker
+// encounters a node that doesn't implement this, it will error.
+type TypeCheckNode interface {
+ TypeCheck(*TypeCheck) (ast.Node, error)
+}
+
+func (v *TypeCheck) Visit(root ast.Node) error {
+ v.lock.Lock()
+ defer v.lock.Unlock()
+ defer v.reset()
+ root.Accept(v.visit)
+
+ // If the resulting type is unknown, then just let the whole thing go.
+ if v.err == errExitUnknown {
+ v.err = nil
+ }
+
+ return v.err
+}
+
+func (v *TypeCheck) visit(raw ast.Node) ast.Node {
+ if v.err != nil {
+ return raw
+ }
+
+ var result ast.Node
+ var err error
+ switch n := raw.(type) {
+ case *ast.Arithmetic:
+ tc := &typeCheckArithmetic{n}
+ result, err = tc.TypeCheck(v)
+ case *ast.Call:
+ tc := &typeCheckCall{n}
+ result, err = tc.TypeCheck(v)
+ case *ast.Conditional:
+ tc := &typeCheckConditional{n}
+ result, err = tc.TypeCheck(v)
+ case *ast.Index:
+ tc := &typeCheckIndex{n}
+ result, err = tc.TypeCheck(v)
+ case *ast.Output:
+ tc := &typeCheckOutput{n}
+ result, err = tc.TypeCheck(v)
+ case *ast.LiteralNode:
+ tc := &typeCheckLiteral{n}
+ result, err = tc.TypeCheck(v)
+ case *ast.VariableAccess:
+ tc := &typeCheckVariableAccess{n}
+ result, err = tc.TypeCheck(v)
+ default:
+ tc, ok := raw.(TypeCheckNode)
+ if !ok {
+ err = fmt.Errorf("unknown node for type check: %#v", raw)
+ break
+ }
+
+ result, err = tc.TypeCheck(v)
+ }
+
+ if err != nil {
+ pos := raw.Pos()
+ v.err = fmt.Errorf("At column %d, line %d: %s",
+ pos.Column, pos.Line, err)
+ }
+
+ return result
+}
+
+type typeCheckArithmetic struct {
+ n *ast.Arithmetic
+}
+
+func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ // The arguments are on the stack in reverse order, so pop them off.
+ exprs := make([]ast.Type, len(tc.n.Exprs))
+ for i, _ := range tc.n.Exprs {
+ exprs[len(tc.n.Exprs)-1-i] = v.StackPop()
+ }
+
+ // If any operand is unknown then our result is automatically unknown
+ for _, ty := range exprs {
+ if ty == ast.TypeUnknown {
+ v.StackPush(ast.TypeUnknown)
+ return tc.n, nil
+ }
+ }
+
+ switch tc.n.Op {
+ case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr:
+ return tc.checkLogical(v, exprs)
+ case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual,
+ ast.ArithmeticOpLessThan, ast.ArithmeticOpGreaterThan,
+ ast.ArithmeticOpGreaterThanOrEqual, ast.ArithmeticOpLessThanOrEqual:
+ return tc.checkComparison(v, exprs)
+ default:
+ return tc.checkNumeric(v, exprs)
+ }
+
+}
+
+func (tc *typeCheckArithmetic) checkNumeric(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
+ // Determine the resulting type we want. We do this by going over
+ // every expression until we find one with a type we recognize.
+ // We do this because the first expr might be a string ("var.foo")
+ // and we need to know what to implicit to.
+ mathFunc := "__builtin_IntMath"
+ mathType := ast.TypeInt
+ for _, v := range exprs {
+ // We assume int math but if we find ANY float, the entire
+ // expression turns into floating point math.
+ if v == ast.TypeFloat {
+ mathFunc = "__builtin_FloatMath"
+ mathType = v
+ break
+ }
+ }
+
+ // Verify the args
+ for i, arg := range exprs {
+ if arg != mathType {
+ cn := v.ImplicitConversion(exprs[i], mathType, tc.n.Exprs[i])
+ if cn != nil {
+ tc.n.Exprs[i] = cn
+ continue
+ }
+
+ return nil, fmt.Errorf(
+ "operand %d should be %s, got %s",
+ i+1, mathType, arg)
+ }
+ }
+
+ // Modulo doesn't work for floats
+ if mathType == ast.TypeFloat && tc.n.Op == ast.ArithmeticOpMod {
+ return nil, fmt.Errorf("modulo cannot be used with floats")
+ }
+
+ // Return type
+ v.StackPush(mathType)
+
+ // Replace our node with a call to the proper function. This isn't
+ // type checked but we already verified types.
+ args := make([]ast.Node, len(tc.n.Exprs)+1)
+ args[0] = &ast.LiteralNode{
+ Value: tc.n.Op,
+ Typex: ast.TypeInt,
+ Posx: tc.n.Pos(),
+ }
+ copy(args[1:], tc.n.Exprs)
+ return &ast.Call{
+ Func: mathFunc,
+ Args: args,
+ Posx: tc.n.Pos(),
+ }, nil
+}
+
+func (tc *typeCheckArithmetic) checkComparison(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
+ if len(exprs) != 2 {
+ // This should never happen, because the parser never produces
+ // nodes that violate this.
+ return nil, fmt.Errorf(
+ "comparison operators must have exactly two operands",
+ )
+ }
+
+ // The first operand always dictates the type for a comparison.
+ compareFunc := ""
+ compareType := exprs[0]
+ switch compareType {
+ case ast.TypeBool:
+ compareFunc = "__builtin_BoolCompare"
+ case ast.TypeFloat:
+ compareFunc = "__builtin_FloatCompare"
+ case ast.TypeInt:
+ compareFunc = "__builtin_IntCompare"
+ case ast.TypeString:
+ compareFunc = "__builtin_StringCompare"
+ default:
+ return nil, fmt.Errorf(
+ "comparison operators apply only to bool, float, int, and string",
+ )
+ }
+
+ // For non-equality comparisons, we will do implicit conversions to
+ // integer types if possible. In this case, we need to go through and
+ // determine the type of comparison we're doing to enable the implicit
+ // conversion.
+ if tc.n.Op != ast.ArithmeticOpEqual && tc.n.Op != ast.ArithmeticOpNotEqual {
+ compareFunc = "__builtin_IntCompare"
+ compareType = ast.TypeInt
+ for _, expr := range exprs {
+ if expr == ast.TypeFloat {
+ compareFunc = "__builtin_FloatCompare"
+ compareType = ast.TypeFloat
+ break
+ }
+ }
+ }
+
+ // Verify (and possibly, convert) the args
+ for i, arg := range exprs {
+ if arg != compareType {
+ cn := v.ImplicitConversion(exprs[i], compareType, tc.n.Exprs[i])
+ if cn != nil {
+ tc.n.Exprs[i] = cn
+ continue
+ }
+
+ return nil, fmt.Errorf(
+ "operand %d should be %s, got %s",
+ i+1, compareType, arg,
+ )
+ }
+ }
+
+ // Only ints and floats can have the <, >, <= and >= operators applied
+ switch tc.n.Op {
+ case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual:
+ // anything goes
+ default:
+ switch compareType {
+ case ast.TypeFloat, ast.TypeInt:
+ // fine
+ default:
+ return nil, fmt.Errorf(
+ "<, >, <= and >= may apply only to int and float values",
+ )
+ }
+ }
+
+ // Comparison operators always return bool
+ v.StackPush(ast.TypeBool)
+
+ // Replace our node with a call to the proper function. This isn't
+ // type checked but we already verified types.
+ args := make([]ast.Node, len(tc.n.Exprs)+1)
+ args[0] = &ast.LiteralNode{
+ Value: tc.n.Op,
+ Typex: ast.TypeInt,
+ Posx: tc.n.Pos(),
+ }
+ copy(args[1:], tc.n.Exprs)
+ return &ast.Call{
+ Func: compareFunc,
+ Args: args,
+ Posx: tc.n.Pos(),
+ }, nil
+}
+
+func (tc *typeCheckArithmetic) checkLogical(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
+ for i, t := range exprs {
+ if t != ast.TypeBool {
+ cn := v.ImplicitConversion(t, ast.TypeBool, tc.n.Exprs[i])
+ if cn == nil {
+ return nil, fmt.Errorf(
+ "logical operators require boolean operands, not %s",
+ t,
+ )
+ }
+ tc.n.Exprs[i] = cn
+ }
+ }
+
+ // Return type is always boolean
+ v.StackPush(ast.TypeBool)
+
+ // Arithmetic nodes are replaced with a call to a built-in function
+ args := make([]ast.Node, len(tc.n.Exprs)+1)
+ args[0] = &ast.LiteralNode{
+ Value: tc.n.Op,
+ Typex: ast.TypeInt,
+ Posx: tc.n.Pos(),
+ }
+ copy(args[1:], tc.n.Exprs)
+ return &ast.Call{
+ Func: "__builtin_Logical",
+ Args: args,
+ Posx: tc.n.Pos(),
+ }, nil
+}
+
+type typeCheckCall struct {
+ n *ast.Call
+}
+
+func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ // Look up the function in the map
+ function, ok := v.Scope.LookupFunc(tc.n.Func)
+ if !ok {
+ return nil, fmt.Errorf("unknown function called: %s", tc.n.Func)
+ }
+
+ // The arguments are on the stack in reverse order, so pop them off.
+ args := make([]ast.Type, len(tc.n.Args))
+ for i, _ := range tc.n.Args {
+ args[len(tc.n.Args)-1-i] = v.StackPop()
+ }
+
+ // Verify the args
+ for i, expected := range function.ArgTypes {
+ if expected == ast.TypeAny {
+ continue
+ }
+
+ if args[i] == ast.TypeUnknown {
+ v.StackPush(ast.TypeUnknown)
+ return tc.n, nil
+ }
+
+ if args[i] != expected {
+ cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i])
+ if cn != nil {
+ tc.n.Args[i] = cn
+ continue
+ }
+
+ return nil, fmt.Errorf(
+ "%s: argument %d should be %s, got %s",
+ tc.n.Func, i+1, expected.Printable(), args[i].Printable())
+ }
+ }
+
+ // If we're variadic, then verify the types there
+ if function.Variadic && function.VariadicType != ast.TypeAny {
+ args = args[len(function.ArgTypes):]
+ for i, t := range args {
+ if t == ast.TypeUnknown {
+ v.StackPush(ast.TypeUnknown)
+ return tc.n, nil
+ }
+
+ if t != function.VariadicType {
+ realI := i + len(function.ArgTypes)
+ cn := v.ImplicitConversion(
+ t, function.VariadicType, tc.n.Args[realI])
+ if cn != nil {
+ tc.n.Args[realI] = cn
+ continue
+ }
+
+ return nil, fmt.Errorf(
+ "%s: argument %d should be %s, got %s",
+ tc.n.Func, realI,
+ function.VariadicType.Printable(), t.Printable())
+ }
+ }
+ }
+
+ // Return type
+ v.StackPush(function.ReturnType)
+
+ return tc.n, nil
+}
+
+type typeCheckConditional struct {
+ n *ast.Conditional
+}
+
+func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ // On the stack we have the types of the condition, true and false
+ // expressions, but they are in reverse order.
+ falseType := v.StackPop()
+ trueType := v.StackPop()
+ condType := v.StackPop()
+
+ if condType == ast.TypeUnknown {
+ v.StackPush(ast.TypeUnknown)
+ return tc.n, nil
+ }
+
+ if condType != ast.TypeBool {
+ cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr)
+ if cn == nil {
+ return nil, fmt.Errorf(
+ "condition must be type bool, not %s", condType.Printable(),
+ )
+ }
+ tc.n.CondExpr = cn
+ }
+
+ // The types of the true and false expression must match
+ if trueType != falseType {
+
+ // Since passing around stringified versions of other types is
+ // common, we pragmatically allow the false expression to dictate
+ // the result type when the true expression is a string.
+ if trueType == ast.TypeString {
+ cn := v.ImplicitConversion(trueType, falseType, tc.n.TrueExpr)
+ if cn == nil {
+ return nil, fmt.Errorf(
+ "true and false expression types must match; have %s and %s",
+ trueType.Printable(), falseType.Printable(),
+ )
+ }
+ tc.n.TrueExpr = cn
+ trueType = falseType
+ } else {
+ cn := v.ImplicitConversion(falseType, trueType, tc.n.FalseExpr)
+ if cn == nil {
+ return nil, fmt.Errorf(
+ "true and false expression types must match; have %s and %s",
+ trueType.Printable(), falseType.Printable(),
+ )
+ }
+ tc.n.FalseExpr = cn
+ falseType = trueType
+ }
+ }
+
+ // Currently list and map types cannot be used, because we cannot
+ // generally assert that their element types are consistent.
+ // Such support might be added later, either by improving the type
+ // system or restricting usage to only variable and literal expressions,
+ // but for now this is simply prohibited because it doesn't seem to
+ // be a common enough case to be worth the complexity.
+ switch trueType {
+ case ast.TypeList:
+ return nil, fmt.Errorf(
+ "conditional operator cannot be used with list values",
+ )
+ case ast.TypeMap:
+ return nil, fmt.Errorf(
+ "conditional operator cannot be used with map values",
+ )
+ }
+
+ // Result type (guaranteed to also match falseType due to the above)
+ v.StackPush(trueType)
+
+ return tc.n, nil
+}
+
+type typeCheckOutput struct {
+ n *ast.Output
+}
+
+func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ n := tc.n
+ types := make([]ast.Type, len(n.Exprs))
+ for i, _ := range n.Exprs {
+ types[len(n.Exprs)-1-i] = v.StackPop()
+ }
+
+ for _, ty := range types {
+ if ty == ast.TypeUnknown {
+ v.StackPush(ast.TypeUnknown)
+ return tc.n, nil
+ }
+ }
+
+ // If there is only one argument and it is a list, we evaluate to a list
+ if len(types) == 1 {
+ switch t := types[0]; t {
+ case ast.TypeList:
+ fallthrough
+ case ast.TypeMap:
+ v.StackPush(t)
+ return n, nil
+ }
+ }
+
+ // Otherwise, all concat args must be strings, so validate that
+ resultType := ast.TypeString
+ for i, t := range types {
+
+ if t == ast.TypeUnknown {
+ resultType = ast.TypeUnknown
+ continue
+ }
+
+ if t != ast.TypeString {
+ cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i])
+ if cn != nil {
+ n.Exprs[i] = cn
+ continue
+ }
+
+ return nil, fmt.Errorf(
+ "output of an HIL expression must be a string, or a single list (argument %d is %s)", i+1, t)
+ }
+ }
+
+ // This always results in type string, unless there are unknowns
+ v.StackPush(resultType)
+
+ return n, nil
+}
+
+type typeCheckLiteral struct {
+ n *ast.LiteralNode
+}
+
+func (tc *typeCheckLiteral) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ v.StackPush(tc.n.Typex)
+ return tc.n, nil
+}
+
+type typeCheckVariableAccess struct {
+ n *ast.VariableAccess
+}
+
+func (tc *typeCheckVariableAccess) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ // Look up the variable in the map
+ variable, ok := v.Scope.LookupVar(tc.n.Name)
+ if !ok {
+ return nil, fmt.Errorf(
+ "unknown variable accessed: %s", tc.n.Name)
+ }
+
+ // Add the type to the stack
+ v.StackPush(variable.Type)
+
+ return tc.n, nil
+}
+
+type typeCheckIndex struct {
+ n *ast.Index
+}
+
+func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ keyType := v.StackPop()
+ targetType := v.StackPop()
+
+ if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown {
+ v.StackPush(ast.TypeUnknown)
+ return tc.n, nil
+ }
+
+ // Ensure we have a VariableAccess as the target
+ varAccessNode, ok := tc.n.Target.(*ast.VariableAccess)
+ if !ok {
+ return nil, fmt.Errorf(
+ "target of an index must be a VariableAccess node, was %T", tc.n.Target)
+ }
+
+ // Get the variable
+ variable, ok := v.Scope.LookupVar(varAccessNode.Name)
+ if !ok {
+ return nil, fmt.Errorf(
+ "unknown variable accessed: %s", varAccessNode.Name)
+ }
+
+ switch targetType {
+ case ast.TypeList:
+ if keyType != ast.TypeInt {
+ tc.n.Key = v.ImplicitConversion(keyType, ast.TypeInt, tc.n.Key)
+ if tc.n.Key == nil {
+ return nil, fmt.Errorf(
+ "key of an index must be an int, was %s", keyType)
+ }
+ }
+
+ valType, err := ast.VariableListElementTypesAreHomogenous(
+ varAccessNode.Name, variable.Value.([]ast.Variable))
+ if err != nil {
+ return tc.n, err
+ }
+
+ v.StackPush(valType)
+ return tc.n, nil
+ case ast.TypeMap:
+ if keyType != ast.TypeString {
+ tc.n.Key = v.ImplicitConversion(keyType, ast.TypeString, tc.n.Key)
+ if tc.n.Key == nil {
+ return nil, fmt.Errorf(
+ "key of an index must be a string, was %s", keyType)
+ }
+ }
+
+ valType, err := ast.VariableMapValueTypesAreHomogenous(
+ varAccessNode.Name, variable.Value.(map[string]ast.Variable))
+ if err != nil {
+ return tc.n, err
+ }
+
+ v.StackPush(valType)
+ return tc.n, nil
+ default:
+ return nil, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type)
+ }
+}
+
+func (v *TypeCheck) ImplicitConversion(
+ actual ast.Type, expected ast.Type, n ast.Node) ast.Node {
+ if v.Implicit == nil {
+ return nil
+ }
+
+ fromMap, ok := v.Implicit[actual]
+ if !ok {
+ return nil
+ }
+
+ toFunc, ok := fromMap[expected]
+ if !ok {
+ return nil
+ }
+
+ return &ast.Call{
+ Func: toFunc,
+ Args: []ast.Node{n},
+ Posx: n.Pos(),
+ }
+}
+
+func (v *TypeCheck) reset() {
+ v.Stack = nil
+ v.err = nil
+}
+
+func (v *TypeCheck) StackPush(t ast.Type) {
+ v.Stack = append(v.Stack, t)
+}
+
+func (v *TypeCheck) StackPop() ast.Type {
+ var x ast.Type
+ x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1]
+ return x
+}
+
+func (v *TypeCheck) StackPeek() ast.Type {
+ if len(v.Stack) == 0 {
+ return ast.TypeInvalid
+ }
+
+ return v.Stack[len(v.Stack)-1]
+}
diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go
new file mode 100644
index 00000000..f2024d01
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/convert.go
@@ -0,0 +1,159 @@
+package hil
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/hashicorp/hil/ast"
+ "github.com/mitchellh/mapstructure"
+)
+
+// UnknownValue is a sentinel value that can be used to denote
+// that a value of a variable (or map element, list element, etc.)
+// is unknown. This will always have the type ast.TypeUnknown.
+const UnknownValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
+
+var hilMapstructureDecodeHookSlice []interface{}
+var hilMapstructureDecodeHookStringSlice []string
+var hilMapstructureDecodeHookMap map[string]interface{}
+
+// hilMapstructureWeakDecode behaves in the same way as mapstructure.WeakDecode
+// but has a DecodeHook which defeats the backward compatibility mode of mapstructure
+// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This
+// allows us to use WeakDecode (desirable), but not fail on empty lists.
+func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error {
+ config := &mapstructure.DecoderConfig{
+ DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) {
+ sliceType := reflect.TypeOf(hilMapstructureDecodeHookSlice)
+ stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice)
+ mapType := reflect.TypeOf(hilMapstructureDecodeHookMap)
+
+ if (source == sliceType || source == stringSliceType) && target == mapType {
+ return nil, fmt.Errorf("Cannot convert %s into a %s", source, target)
+ }
+
+ return val, nil
+ },
+ WeaklyTypedInput: true,
+ Result: rawVal,
+ }
+
+ decoder, err := mapstructure.NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(m)
+}
+
+func InterfaceToVariable(input interface{}) (ast.Variable, error) {
+ if inputVariable, ok := input.(ast.Variable); ok {
+ return inputVariable, nil
+ }
+
+ var stringVal string
+ if err := hilMapstructureWeakDecode(input, &stringVal); err == nil {
+ // Special case the unknown value to turn into "unknown"
+ if stringVal == UnknownValue {
+ return ast.Variable{Value: UnknownValue, Type: ast.TypeUnknown}, nil
+ }
+
+ // Otherwise return the string value
+ return ast.Variable{
+ Type: ast.TypeString,
+ Value: stringVal,
+ }, nil
+ }
+
+ var mapVal map[string]interface{}
+ if err := hilMapstructureWeakDecode(input, &mapVal); err == nil {
+ elements := make(map[string]ast.Variable)
+ for i, element := range mapVal {
+ varElement, err := InterfaceToVariable(element)
+ if err != nil {
+ return ast.Variable{}, err
+ }
+ elements[i] = varElement
+ }
+
+ return ast.Variable{
+ Type: ast.TypeMap,
+ Value: elements,
+ }, nil
+ }
+
+ var sliceVal []interface{}
+ if err := hilMapstructureWeakDecode(input, &sliceVal); err == nil {
+ elements := make([]ast.Variable, len(sliceVal))
+ for i, element := range sliceVal {
+ varElement, err := InterfaceToVariable(element)
+ if err != nil {
+ return ast.Variable{}, err
+ }
+ elements[i] = varElement
+ }
+
+ return ast.Variable{
+ Type: ast.TypeList,
+ Value: elements,
+ }, nil
+ }
+
+ return ast.Variable{}, fmt.Errorf("value for conversion must be a string, interface{} or map[string]interface: got %T", input)
+}
+
+func VariableToInterface(input ast.Variable) (interface{}, error) {
+ if input.Type == ast.TypeString {
+ if inputStr, ok := input.Value.(string); ok {
+ return inputStr, nil
+ } else {
+ return nil, fmt.Errorf("ast.Variable with type string has value which is not a string")
+ }
+ }
+
+ if input.Type == ast.TypeList {
+ inputList, ok := input.Value.([]ast.Variable)
+ if !ok {
+ return nil, fmt.Errorf("ast.Variable with type list has value which is not a []ast.Variable")
+ }
+
+ result := make([]interface{}, 0)
+ if len(inputList) == 0 {
+ return result, nil
+ }
+
+ for _, element := range inputList {
+ if convertedElement, err := VariableToInterface(element); err == nil {
+ result = append(result, convertedElement)
+ } else {
+ return nil, err
+ }
+ }
+
+ return result, nil
+ }
+
+ if input.Type == ast.TypeMap {
+ inputMap, ok := input.Value.(map[string]ast.Variable)
+ if !ok {
+ return nil, fmt.Errorf("ast.Variable with type map has value which is not a map[string]ast.Variable")
+ }
+
+ result := make(map[string]interface{}, 0)
+ if len(inputMap) == 0 {
+ return result, nil
+ }
+
+ for key, value := range inputMap {
+ if convertedValue, err := VariableToInterface(value); err == nil {
+ result[key] = convertedValue
+ } else {
+ return nil, err
+ }
+ }
+
+ return result, nil
+ }
+
+ return nil, fmt.Errorf("unknown input type: %s", input.Type)
+}
diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go
new file mode 100644
index 00000000..27820769
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/eval.go
@@ -0,0 +1,472 @@
+package hil
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+// EvalConfig is the configuration for evaluating.
+type EvalConfig struct {
+ // GlobalScope is the global scope of execution for evaluation.
+ GlobalScope *ast.BasicScope
+
+ // SemanticChecks is a list of additional semantic checks that will be run
+ // on the tree prior to evaluating it. The type checker, identifier checker,
+ // etc. will be run before these automatically.
+ SemanticChecks []SemanticChecker
+}
+
+// SemanticChecker is the type that must be implemented to do a
+// semantic check on an AST tree. This will be called with the root node.
+type SemanticChecker func(ast.Node) error
+
+// EvaluationResult is a struct returned from the hil.Eval function,
+// representing the result of an interpolation. Results are returned in their
+// "natural" Go structure rather than in terms of the HIL AST. For the types
+// currently implemented, this means that the Value field can be interpreted as
+// the following Go types:
+// TypeInvalid: undefined
+// TypeString: string
+// TypeList: []interface{}
+// TypeMap: map[string]interface{}
+// TypBool: bool
+type EvaluationResult struct {
+ Type EvalType
+ Value interface{}
+}
+
+// InvalidResult is a structure representing the result of a HIL interpolation
+// which has invalid syntax, missing variables, or some other type of error.
+// The error is described out of band in the accompanying error return value.
+var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil}
+
+// errExitUnknown is an internal error that when returned means the result
+// is an unknown value. We use this for early exit.
+var errExitUnknown = errors.New("unknown value")
+
+func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) {
+ output, outputType, err := internalEval(root, config)
+ if err != nil {
+ return InvalidResult, err
+ }
+
+ // If the result contains any nested unknowns then the result as a whole
+ // is unknown, so that callers only have to deal with "entirely known"
+ // or "entirely unknown" as outcomes.
+ if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) {
+ outputType = ast.TypeUnknown
+ output = UnknownValue
+ }
+
+ switch outputType {
+ case ast.TypeList:
+ val, err := VariableToInterface(ast.Variable{
+ Type: ast.TypeList,
+ Value: output,
+ })
+ return EvaluationResult{
+ Type: TypeList,
+ Value: val,
+ }, err
+ case ast.TypeMap:
+ val, err := VariableToInterface(ast.Variable{
+ Type: ast.TypeMap,
+ Value: output,
+ })
+ return EvaluationResult{
+ Type: TypeMap,
+ Value: val,
+ }, err
+ case ast.TypeString:
+ return EvaluationResult{
+ Type: TypeString,
+ Value: output,
+ }, nil
+ case ast.TypeBool:
+ return EvaluationResult{
+ Type: TypeBool,
+ Value: output,
+ }, nil
+ case ast.TypeUnknown:
+ return EvaluationResult{
+ Type: TypeUnknown,
+ Value: UnknownValue,
+ }, nil
+ default:
+ return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType)
+ }
+}
+
+// Eval evaluates the given AST tree and returns its output value, the type
+// of the output, and any error that occurred.
+func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, error) {
+ // Copy the scope so we can add our builtins
+ if config == nil {
+ config = new(EvalConfig)
+ }
+ scope := registerBuiltins(config.GlobalScope)
+ implicitMap := map[ast.Type]map[ast.Type]string{
+ ast.TypeFloat: {
+ ast.TypeInt: "__builtin_FloatToInt",
+ ast.TypeString: "__builtin_FloatToString",
+ },
+ ast.TypeInt: {
+ ast.TypeFloat: "__builtin_IntToFloat",
+ ast.TypeString: "__builtin_IntToString",
+ },
+ ast.TypeString: {
+ ast.TypeInt: "__builtin_StringToInt",
+ ast.TypeFloat: "__builtin_StringToFloat",
+ ast.TypeBool: "__builtin_StringToBool",
+ },
+ ast.TypeBool: {
+ ast.TypeString: "__builtin_BoolToString",
+ },
+ }
+
+ // Build our own semantic checks that we always run
+ tv := &TypeCheck{Scope: scope, Implicit: implicitMap}
+ ic := &IdentifierCheck{Scope: scope}
+
+ // Build up the semantic checks for execution
+ checks := make(
+ []SemanticChecker,
+ len(config.SemanticChecks),
+ len(config.SemanticChecks)+2)
+ copy(checks, config.SemanticChecks)
+ checks = append(checks, ic.Visit)
+ checks = append(checks, tv.Visit)
+
+ // Run the semantic checks
+ for _, check := range checks {
+ if err := check(root); err != nil {
+ return nil, ast.TypeInvalid, err
+ }
+ }
+
+ // Execute
+ v := &evalVisitor{Scope: scope}
+ return v.Visit(root)
+}
+
+// EvalNode is the interface that must be implemented by any ast.Node
+// to support evaluation. This will be called in visitor pattern order.
+// The result of each call to Eval is automatically pushed onto the
+// stack as a LiteralNode. Pop elements off the stack to get child
+// values.
+type EvalNode interface {
+ Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error)
+}
+
+type evalVisitor struct {
+ Scope ast.Scope
+ Stack ast.Stack
+
+ err error
+ lock sync.Mutex
+}
+
+func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) {
+ // Run the actual visitor pattern
+ root.Accept(v.visit)
+
+ // Get our result and clear out everything else
+ var result *ast.LiteralNode
+ if v.Stack.Len() > 0 {
+ result = v.Stack.Pop().(*ast.LiteralNode)
+ } else {
+ result = new(ast.LiteralNode)
+ }
+ resultErr := v.err
+ if resultErr == errExitUnknown {
+ // This means the return value is unknown and we used the error
+ // as an early exit mechanism. Reset since the value on the stack
+ // should be the unknown value.
+ resultErr = nil
+ }
+
+ // Clear everything else so we aren't just dangling
+ v.Stack.Reset()
+ v.err = nil
+
+ t, err := result.Type(v.Scope)
+ if err != nil {
+ return nil, ast.TypeInvalid, err
+ }
+
+ return result.Value, t, resultErr
+}
+
+func (v *evalVisitor) visit(raw ast.Node) ast.Node {
+ if v.err != nil {
+ return raw
+ }
+
+ en, err := evalNode(raw)
+ if err != nil {
+ v.err = err
+ return raw
+ }
+
+ out, outType, err := en.Eval(v.Scope, &v.Stack)
+ if err != nil {
+ v.err = err
+ return raw
+ }
+
+ v.Stack.Push(&ast.LiteralNode{
+ Value: out,
+ Typex: outType,
+ })
+
+ if outType == ast.TypeUnknown {
+ // Halt immediately
+ v.err = errExitUnknown
+ return raw
+ }
+
+ return raw
+}
+
+// evalNode is a private function that returns an EvalNode for built-in
+// types as well as any other EvalNode implementations.
+func evalNode(raw ast.Node) (EvalNode, error) {
+ switch n := raw.(type) {
+ case *ast.Index:
+ return &evalIndex{n}, nil
+ case *ast.Call:
+ return &evalCall{n}, nil
+ case *ast.Conditional:
+ return &evalConditional{n}, nil
+ case *ast.Output:
+ return &evalOutput{n}, nil
+ case *ast.LiteralNode:
+ return &evalLiteralNode{n}, nil
+ case *ast.VariableAccess:
+ return &evalVariableAccess{n}, nil
+ default:
+ en, ok := n.(EvalNode)
+ if !ok {
+ return nil, fmt.Errorf("node doesn't support evaluation: %#v", raw)
+ }
+
+ return en, nil
+ }
+}
+
+type evalCall struct{ *ast.Call }
+
+func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
+ // Look up the function in the map
+ function, ok := s.LookupFunc(v.Func)
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "unknown function called: %s", v.Func)
+ }
+
+ // The arguments are on the stack in reverse order, so pop them off.
+ args := make([]interface{}, len(v.Args))
+ for i, _ := range v.Args {
+ node := stack.Pop().(*ast.LiteralNode)
+ if node.IsUnknown() {
+ // If any arguments are unknown then the result is automatically unknown
+ return UnknownValue, ast.TypeUnknown, nil
+ }
+ args[len(v.Args)-1-i] = node.Value
+ }
+
+ // Call the function
+ result, err := function.Callback(args)
+ if err != nil {
+ return nil, ast.TypeInvalid, fmt.Errorf("%s: %s", v.Func, err)
+ }
+
+ return result, function.ReturnType, nil
+}
+
+type evalConditional struct{ *ast.Conditional }
+
+func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
+ // On the stack we have literal nodes representing the resulting values
+ // of the condition, true and false expressions, but they are in reverse
+ // order.
+ falseLit := stack.Pop().(*ast.LiteralNode)
+ trueLit := stack.Pop().(*ast.LiteralNode)
+ condLit := stack.Pop().(*ast.LiteralNode)
+
+ if condLit.IsUnknown() {
+ // If our conditional is unknown then our result is also unknown
+ return UnknownValue, ast.TypeUnknown, nil
+ }
+
+ if condLit.Value.(bool) {
+ return trueLit.Value, trueLit.Typex, nil
+ } else {
+ return falseLit.Value, trueLit.Typex, nil
+ }
+}
+
+type evalIndex struct{ *ast.Index }
+
+func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
+ key := stack.Pop().(*ast.LiteralNode)
+ target := stack.Pop().(*ast.LiteralNode)
+
+ variableName := v.Index.Target.(*ast.VariableAccess).Name
+
+ if key.IsUnknown() {
+ // If our key is unknown then our result is also unknown
+ return UnknownValue, ast.TypeUnknown, nil
+ }
+
+ // For target, we'll accept collections containing unknown values but
+ // we still need to catch when the collection itself is unknown, shallowly.
+ if target.Typex == ast.TypeUnknown {
+ return UnknownValue, ast.TypeUnknown, nil
+ }
+
+ switch target.Typex {
+ case ast.TypeList:
+ return v.evalListIndex(variableName, target.Value, key.Value)
+ case ast.TypeMap:
+ return v.evalMapIndex(variableName, target.Value, key.Value)
+ default:
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "target %q for indexing must be ast.TypeList or ast.TypeMap, is %s",
+ variableName, target.Typex)
+ }
+}
+
+func (v *evalIndex) evalListIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) {
+ // We assume type checking was already done and we can assume that target
+ // is a list and key is an int
+ list, ok := target.([]ast.Variable)
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "cannot cast target to []Variable, is: %T", target)
+ }
+
+ keyInt, ok := key.(int)
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "cannot cast key to int, is: %T", key)
+ }
+
+ if len(list) == 0 {
+ return nil, ast.TypeInvalid, fmt.Errorf("list is empty")
+ }
+
+ if keyInt < 0 || len(list) < keyInt+1 {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "index %d out of range for list %s (max %d)",
+ keyInt, variableName, len(list))
+ }
+
+ returnVal := list[keyInt].Value
+ returnType := list[keyInt].Type
+ return returnVal, returnType, nil
+}
+
+func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) {
+ // We assume type checking was already done and we can assume that target
+ // is a map and key is a string
+ vmap, ok := target.(map[string]ast.Variable)
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "cannot cast target to map[string]Variable, is: %T", target)
+ }
+
+ keyString, ok := key.(string)
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "cannot cast key to string, is: %T", key)
+ }
+
+ if len(vmap) == 0 {
+ return nil, ast.TypeInvalid, fmt.Errorf("map is empty")
+ }
+
+ value, ok := vmap[keyString]
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "key %q does not exist in map %s", keyString, variableName)
+ }
+
+ return value.Value, value.Type, nil
+}
+
+type evalOutput struct{ *ast.Output }
+
+func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
+ // The expressions should all be on the stack in reverse
+ // order. So pop them off, reverse their order, and concatenate.
+ nodes := make([]*ast.LiteralNode, 0, len(v.Exprs))
+ haveUnknown := false
+ for range v.Exprs {
+ n := stack.Pop().(*ast.LiteralNode)
+ nodes = append(nodes, n)
+
+ // If we have any unknowns then the whole result is unknown
+ // (we must deal with this first, because the type checker can
+ // skip type conversions in the presence of unknowns, and thus
+ // any of our other nodes may be incorrectly typed.)
+ if n.IsUnknown() {
+ haveUnknown = true
+ }
+ }
+
+ if haveUnknown {
+ return UnknownValue, ast.TypeUnknown, nil
+ }
+
+ // Special case the single list and map
+ if len(nodes) == 1 {
+ switch t := nodes[0].Typex; t {
+ case ast.TypeList:
+ fallthrough
+ case ast.TypeMap:
+ fallthrough
+ case ast.TypeUnknown:
+ return nodes[0].Value, t, nil
+ }
+ }
+
+ // Otherwise concatenate the strings
+ var buf bytes.Buffer
+ for i := len(nodes) - 1; i >= 0; i-- {
+ if nodes[i].Typex != ast.TypeString {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "invalid output with %s value at index %d: %#v",
+ nodes[i].Typex,
+ i,
+ nodes[i].Value,
+ )
+ }
+ buf.WriteString(nodes[i].Value.(string))
+ }
+
+ return buf.String(), ast.TypeString, nil
+}
+
+type evalLiteralNode struct{ *ast.LiteralNode }
+
+func (v *evalLiteralNode) Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) {
+ return v.Value, v.Typex, nil
+}
+
+type evalVariableAccess struct{ *ast.VariableAccess }
+
+func (v *evalVariableAccess) Eval(scope ast.Scope, _ *ast.Stack) (interface{}, ast.Type, error) {
+ // Look up the variable in the map
+ variable, ok := scope.LookupVar(v.Name)
+ if !ok {
+ return nil, ast.TypeInvalid, fmt.Errorf(
+ "unknown variable accessed: %s", v.Name)
+ }
+
+ return variable.Value, variable.Type, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/eval_type.go b/vendor/github.com/hashicorp/hil/eval_type.go
new file mode 100644
index 00000000..6946ecd2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/eval_type.go
@@ -0,0 +1,16 @@
+package hil
+
+//go:generate stringer -type=EvalType eval_type.go
+
+// EvalType represents the type of the output returned from a HIL
+// evaluation.
+type EvalType uint32
+
+const (
+ TypeInvalid EvalType = 0
+ TypeString EvalType = 1 << iota
+ TypeBool
+ TypeList
+ TypeMap
+ TypeUnknown
+)
diff --git a/vendor/github.com/hashicorp/hil/evaltype_string.go b/vendor/github.com/hashicorp/hil/evaltype_string.go
new file mode 100644
index 00000000..b107ddd4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/evaltype_string.go
@@ -0,0 +1,42 @@
+// Code generated by "stringer -type=EvalType eval_type.go"; DO NOT EDIT
+
+package hil
+
+import "fmt"
+
+const (
+ _EvalType_name_0 = "TypeInvalid"
+ _EvalType_name_1 = "TypeString"
+ _EvalType_name_2 = "TypeBool"
+ _EvalType_name_3 = "TypeList"
+ _EvalType_name_4 = "TypeMap"
+ _EvalType_name_5 = "TypeUnknown"
+)
+
+var (
+ _EvalType_index_0 = [...]uint8{0, 11}
+ _EvalType_index_1 = [...]uint8{0, 10}
+ _EvalType_index_2 = [...]uint8{0, 8}
+ _EvalType_index_3 = [...]uint8{0, 8}
+ _EvalType_index_4 = [...]uint8{0, 7}
+ _EvalType_index_5 = [...]uint8{0, 11}
+)
+
+func (i EvalType) String() string {
+ switch {
+ case i == 0:
+ return _EvalType_name_0
+ case i == 2:
+ return _EvalType_name_1
+ case i == 4:
+ return _EvalType_name_2
+ case i == 8:
+ return _EvalType_name_3
+ case i == 16:
+ return _EvalType_name_4
+ case i == 32:
+ return _EvalType_name_5
+ default:
+ return fmt.Sprintf("EvalType(%d)", i)
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/parse.go b/vendor/github.com/hashicorp/hil/parse.go
new file mode 100644
index 00000000..ecbe1fdb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parse.go
@@ -0,0 +1,29 @@
+package hil
+
+import (
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/hil/parser"
+ "github.com/hashicorp/hil/scanner"
+)
+
+// Parse parses the given program and returns an executable AST tree.
+//
+// Syntax errors are returned with error having the dynamic type
+// *parser.ParseError, which gives the caller access to the source position
+// where the error was found, which allows (for example) combining it with
+// a known source filename to add context to the error message.
+func Parse(v string) (ast.Node, error) {
+ return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1})
+}
+
+// ParseWithPosition is like Parse except that it overrides the source
+// row and column position of the first character in the string, which should
+// be 1-based.
+//
+// This can be used when HIL is embedded in another language and the outer
+// parser knows the row and column where the HIL expression started within
+// the overall source file.
+func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) {
+ ch := scanner.Scan(v, pos)
+ return parser.Parse(ch)
+}
diff --git a/vendor/github.com/hashicorp/hil/parser/binary_op.go b/vendor/github.com/hashicorp/hil/parser/binary_op.go
new file mode 100644
index 00000000..2e013e01
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parser/binary_op.go
@@ -0,0 +1,45 @@
+package parser
+
+import (
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/hil/scanner"
+)
+
+var binaryOps []map[scanner.TokenType]ast.ArithmeticOp
+
+func init() {
+ // This operation table maps from the operator's scanner token type
+ // to the AST arithmetic operation. All expressions produced from
+ // binary operators are *ast.Arithmetic nodes.
+ //
+ // Binary operator groups are listed in order of precedence, with
+ // the *lowest* precedence first. Operators within the same group
+ // have left-to-right associativity.
+ binaryOps = []map[scanner.TokenType]ast.ArithmeticOp{
+ {
+ scanner.OR: ast.ArithmeticOpLogicalOr,
+ },
+ {
+ scanner.AND: ast.ArithmeticOpLogicalAnd,
+ },
+ {
+ scanner.EQUAL: ast.ArithmeticOpEqual,
+ scanner.NOTEQUAL: ast.ArithmeticOpNotEqual,
+ },
+ {
+ scanner.GT: ast.ArithmeticOpGreaterThan,
+ scanner.GTE: ast.ArithmeticOpGreaterThanOrEqual,
+ scanner.LT: ast.ArithmeticOpLessThan,
+ scanner.LTE: ast.ArithmeticOpLessThanOrEqual,
+ },
+ {
+ scanner.PLUS: ast.ArithmeticOpAdd,
+ scanner.MINUS: ast.ArithmeticOpSub,
+ },
+ {
+ scanner.STAR: ast.ArithmeticOpMul,
+ scanner.SLASH: ast.ArithmeticOpDiv,
+ scanner.PERCENT: ast.ArithmeticOpMod,
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/parser/error.go b/vendor/github.com/hashicorp/hil/parser/error.go
new file mode 100644
index 00000000..bacd6964
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parser/error.go
@@ -0,0 +1,38 @@
+package parser
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/hil/scanner"
+)
+
+type ParseError struct {
+ Message string
+ Pos ast.Pos
+}
+
+func Errorf(pos ast.Pos, format string, args ...interface{}) error {
+ return &ParseError{
+ Message: fmt.Sprintf(format, args...),
+ Pos: pos,
+ }
+}
+
+// TokenErrorf is a convenient wrapper around Errorf that uses the
+// position of the given token.
+func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error {
+ return Errorf(token.Pos, format, args...)
+}
+
+func ExpectationError(wanted string, got *scanner.Token) error {
+ return TokenErrorf(got, "expected %s but found %s", wanted, got)
+}
+
+func (e *ParseError) Error() string {
+ return fmt.Sprintf("parse error at %s: %s", e.Pos, e.Message)
+}
+
+func (e *ParseError) String() string {
+ return e.Error()
+}
diff --git a/vendor/github.com/hashicorp/hil/parser/fuzz.go b/vendor/github.com/hashicorp/hil/parser/fuzz.go
new file mode 100644
index 00000000..de954f38
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parser/fuzz.go
@@ -0,0 +1,28 @@
+// +build gofuzz
+
+package parser
+
+import (
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/hil/scanner"
+)
+
+// This is a fuzz testing function designed to be used with go-fuzz:
+// https://github.com/dvyukov/go-fuzz
+//
+// It's not included in a normal build due to the gofuzz build tag above.
+//
+// There are some input files that you can use as a seed corpus for go-fuzz
+// in the directory ./fuzz-corpus .
+
+func Fuzz(data []byte) int {
+ str := string(data)
+
+ ch := scanner.Scan(str, ast.Pos{Line: 1, Column: 1})
+ _, err := Parse(ch)
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/hashicorp/hil/parser/parser.go b/vendor/github.com/hashicorp/hil/parser/parser.go
new file mode 100644
index 00000000..376f1c49
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parser/parser.go
@@ -0,0 +1,522 @@
+package parser
+
+import (
+ "strconv"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/hil/scanner"
+)
+
+func Parse(ch <-chan *scanner.Token) (ast.Node, error) {
+ peeker := scanner.NewPeeker(ch)
+ parser := &parser{peeker}
+ output, err := parser.ParseTopLevel()
+ peeker.Close()
+ return output, err
+}
+
+type parser struct {
+ peeker *scanner.Peeker
+}
+
+func (p *parser) ParseTopLevel() (ast.Node, error) {
+ return p.parseInterpolationSeq(false)
+}
+
+func (p *parser) ParseQuoted() (ast.Node, error) {
+ return p.parseInterpolationSeq(true)
+}
+
+// parseInterpolationSeq parses either the top-level sequence of literals
+// and interpolation expressions or a similar sequence within a quoted
+// string inside an interpolation expression. The latter case is requested
+// by setting 'quoted' to true.
+func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) {
+ literalType := scanner.LITERAL
+ endType := scanner.EOF
+ if quoted {
+ // exceptions for quoted sequences
+ literalType = scanner.STRING
+ endType = scanner.CQUOTE
+ }
+
+ startPos := p.peeker.Peek().Pos
+
+ if quoted {
+ tok := p.peeker.Read()
+ if tok.Type != scanner.OQUOTE {
+ return nil, ExpectationError("open quote", tok)
+ }
+ }
+
+ var exprs []ast.Node
+ for {
+ tok := p.peeker.Read()
+
+ if tok.Type == endType {
+ break
+ }
+
+ switch tok.Type {
+ case literalType:
+ val, err := p.parseStringToken(tok)
+ if err != nil {
+ return nil, err
+ }
+ exprs = append(exprs, &ast.LiteralNode{
+ Value: val,
+ Typex: ast.TypeString,
+ Posx: tok.Pos,
+ })
+ case scanner.BEGIN:
+ expr, err := p.ParseInterpolation()
+ if err != nil {
+ return nil, err
+ }
+ exprs = append(exprs, expr)
+ default:
+ return nil, ExpectationError(`"${"`, tok)
+ }
+ }
+
+ if len(exprs) == 0 {
+ // If we have no parts at all then the input must've
+ // been an empty string.
+ exprs = append(exprs, &ast.LiteralNode{
+ Value: "",
+ Typex: ast.TypeString,
+ Posx: startPos,
+ })
+ }
+
+ // As a special case, if our "Output" contains only one expression
+ // and it's a literal string then we'll hoist it up to be our
+ // direct return value, so callers can easily recognize a string
+ // that has no interpolations at all.
+ if len(exprs) == 1 {
+ if lit, ok := exprs[0].(*ast.LiteralNode); ok {
+ if lit.Typex == ast.TypeString {
+ return lit, nil
+ }
+ }
+ }
+
+ return &ast.Output{
+ Exprs: exprs,
+ Posx: startPos,
+ }, nil
+}
+
+// parseStringToken takes a token of either LITERAL or STRING type and
+// returns the interpreted string, after processing any relevant
+// escape sequences.
+func (p *parser) parseStringToken(tok *scanner.Token) (string, error) {
+ var backslashes bool
+ switch tok.Type {
+ case scanner.LITERAL:
+ backslashes = false
+ case scanner.STRING:
+ backslashes = true
+ default:
+ panic("unsupported string token type")
+ }
+
+ raw := []byte(tok.Content)
+ buf := make([]byte, 0, len(raw))
+
+ for i := 0; i < len(raw); i++ {
+ b := raw[i]
+ more := len(raw) > (i + 1)
+
+ if b == '$' {
+ if more && raw[i+1] == '$' {
+ // skip over the second dollar sign
+ i++
+ }
+ } else if backslashes && b == '\\' {
+ if !more {
+ return "", Errorf(
+ ast.Pos{
+ Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
+ Line: tok.Pos.Line,
+ },
+ `unfinished backslash escape sequence`,
+ )
+ }
+ escapeType := raw[i+1]
+ switch escapeType {
+ case '\\':
+ // skip over the second slash
+ i++
+ case 'n':
+ b = '\n'
+ i++
+ case '"':
+ b = '"'
+ i++
+ default:
+ return "", Errorf(
+ ast.Pos{
+ Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
+ Line: tok.Pos.Line,
+ },
+ `invalid backslash escape sequence`,
+ )
+ }
+ }
+
+ buf = append(buf, b)
+ }
+
+ return string(buf), nil
+}
+
+func (p *parser) ParseInterpolation() (ast.Node, error) {
+ // By the time we're called, we're already "inside" the ${ sequence
+ // because the caller consumed the ${ token.
+
+ expr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ err = p.requireTokenType(scanner.END, `"}"`)
+ if err != nil {
+ return nil, err
+ }
+
+ return expr, nil
+}
+
+func (p *parser) ParseExpression() (ast.Node, error) {
+ return p.parseTernaryCond()
+}
+
+func (p *parser) parseTernaryCond() (ast.Node, error) {
+ // The ternary condition operator (.. ? .. : ..) behaves somewhat
+ // like a binary operator except that the "operator" is itself
+ // an expression enclosed in two punctuation characters.
+ // The middle expression is parsed as if the ? and : symbols
+ // were parentheses. The "rhs" (the "false expression") is then
+ // treated right-associatively so it behaves similarly to the
+ // middle in terms of precedence.
+
+ startPos := p.peeker.Peek().Pos
+
+ var cond, trueExpr, falseExpr ast.Node
+ var err error
+
+ cond, err = p.parseBinaryOps(binaryOps)
+ if err != nil {
+ return nil, err
+ }
+
+ next := p.peeker.Peek()
+ if next.Type != scanner.QUESTION {
+ return cond, nil
+ }
+
+ p.peeker.Read() // eat question mark
+
+ trueExpr, err = p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ colon := p.peeker.Read()
+ if colon.Type != scanner.COLON {
+ return nil, ExpectationError(":", colon)
+ }
+
+ falseExpr, err = p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ return &ast.Conditional{
+ CondExpr: cond,
+ TrueExpr: trueExpr,
+ FalseExpr: falseExpr,
+ Posx: startPos,
+ }, nil
+}
+
+// parseBinaryOps calls itself recursively to work through all of the
+// operator precedence groups, and then eventually calls ParseExpressionTerm
+// for each operand.
+func (p *parser) parseBinaryOps(ops []map[scanner.TokenType]ast.ArithmeticOp) (ast.Node, error) {
+ if len(ops) == 0 {
+ // We've run out of operators, so now we'll just try to parse a term.
+ return p.ParseExpressionTerm()
+ }
+
+ thisLevel := ops[0]
+ remaining := ops[1:]
+
+ startPos := p.peeker.Peek().Pos
+
+ var lhs, rhs ast.Node
+ operator := ast.ArithmeticOpInvalid
+ var err error
+
+ // parse a term that might be the first operand of a binary
+ // expression or it might just be a standalone term, but
+ // we won't know until we've parsed it and can look ahead
+ // to see if there's an operator token.
+ lhs, err = p.parseBinaryOps(remaining)
+ if err != nil {
+ return nil, err
+ }
+
+ // We'll keep eating up arithmetic operators until we run
+ // out, so that operators with the same precedence will combine in a
+ // left-associative manner:
+ // a+b+c => (a+b)+c, not a+(b+c)
+ //
+ // Should we later want to have right-associative operators, a way
+ // to achieve that would be to call back up to ParseExpression here
+ // instead of iteratively parsing only the remaining operators.
+ for {
+ next := p.peeker.Peek()
+ var newOperator ast.ArithmeticOp
+ var ok bool
+ if newOperator, ok = thisLevel[next.Type]; !ok {
+ break
+ }
+
+ // Are we extending an expression started on
+ // the previous iteration?
+ if operator != ast.ArithmeticOpInvalid {
+ lhs = &ast.Arithmetic{
+ Op: operator,
+ Exprs: []ast.Node{lhs, rhs},
+ Posx: startPos,
+ }
+ }
+
+ operator = newOperator
+ p.peeker.Read() // eat operator token
+ rhs, err = p.parseBinaryOps(remaining)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if operator != ast.ArithmeticOpInvalid {
+ return &ast.Arithmetic{
+ Op: operator,
+ Exprs: []ast.Node{lhs, rhs},
+ Posx: startPos,
+ }, nil
+ } else {
+ return lhs, nil
+ }
+}
+
+func (p *parser) ParseExpressionTerm() (ast.Node, error) {
+
+ next := p.peeker.Peek()
+
+ switch next.Type {
+
+ case scanner.OPAREN:
+ p.peeker.Read()
+ expr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ err = p.requireTokenType(scanner.CPAREN, `")"`)
+ return expr, err
+
+ case scanner.OQUOTE:
+ return p.ParseQuoted()
+
+ case scanner.INTEGER:
+ tok := p.peeker.Read()
+ val, err := strconv.Atoi(tok.Content)
+ if err != nil {
+ return nil, TokenErrorf(tok, "invalid integer: %s", err)
+ }
+ return &ast.LiteralNode{
+ Value: val,
+ Typex: ast.TypeInt,
+ Posx: tok.Pos,
+ }, nil
+
+ case scanner.FLOAT:
+ tok := p.peeker.Read()
+ val, err := strconv.ParseFloat(tok.Content, 64)
+ if err != nil {
+ return nil, TokenErrorf(tok, "invalid float: %s", err)
+ }
+ return &ast.LiteralNode{
+ Value: val,
+ Typex: ast.TypeFloat,
+ Posx: tok.Pos,
+ }, nil
+
+ case scanner.BOOL:
+ tok := p.peeker.Read()
+ // the scanner guarantees that tok.Content is either "true" or "false"
+ var val bool
+ if tok.Content[0] == 't' {
+ val = true
+ } else {
+ val = false
+ }
+ return &ast.LiteralNode{
+ Value: val,
+ Typex: ast.TypeBool,
+ Posx: tok.Pos,
+ }, nil
+
+ case scanner.MINUS:
+ opTok := p.peeker.Read()
+ // important to use ParseExpressionTerm rather than ParseExpression
+ // here, otherwise we can capture a following binary expression into
+ // our negation.
+ // e.g. -46+5 should parse as (0-46)+5, not 0-(46+5)
+ operand, err := p.ParseExpressionTerm()
+ if err != nil {
+ return nil, err
+ }
+ // The AST currently represents negative numbers as
+ // a binary subtraction of the number from zero.
+ return &ast.Arithmetic{
+ Op: ast.ArithmeticOpSub,
+ Exprs: []ast.Node{
+ &ast.LiteralNode{
+ Value: 0,
+ Typex: ast.TypeInt,
+ Posx: opTok.Pos,
+ },
+ operand,
+ },
+ Posx: opTok.Pos,
+ }, nil
+
+ case scanner.BANG:
+ opTok := p.peeker.Read()
+ // important to use ParseExpressionTerm rather than ParseExpression
+ // here, otherwise we can capture a following binary expression into
+ // our negation.
+ operand, err := p.ParseExpressionTerm()
+ if err != nil {
+ return nil, err
+ }
+ // The AST currently represents binary negation as an equality
+ // test with "false".
+ return &ast.Arithmetic{
+ Op: ast.ArithmeticOpEqual,
+ Exprs: []ast.Node{
+ &ast.LiteralNode{
+ Value: false,
+ Typex: ast.TypeBool,
+ Posx: opTok.Pos,
+ },
+ operand,
+ },
+ Posx: opTok.Pos,
+ }, nil
+
+ case scanner.IDENTIFIER:
+ return p.ParseScopeInteraction()
+
+ default:
+ return nil, ExpectationError("expression", next)
+ }
+}
+
+// ParseScopeInteraction parses the expression types that interact
+// with the evaluation scope: variable access, function calls, and
+// indexing.
+//
+// Indexing should actually be a distinct operator in its own right,
+// so that e.g. it can be applied to the result of a function call,
+// but for now we're preserving the behavior of the older yacc-based
+// parser.
+func (p *parser) ParseScopeInteraction() (ast.Node, error) {
+ first := p.peeker.Read()
+ startPos := first.Pos
+ if first.Type != scanner.IDENTIFIER {
+ return nil, ExpectationError("identifier", first)
+ }
+
+ next := p.peeker.Peek()
+ if next.Type == scanner.OPAREN {
+ // function call
+ funcName := first.Content
+ p.peeker.Read() // eat paren
+ var args []ast.Node
+
+ for {
+ if p.peeker.Peek().Type == scanner.CPAREN {
+ break
+ }
+
+ arg, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ args = append(args, arg)
+
+ if p.peeker.Peek().Type == scanner.COMMA {
+ p.peeker.Read() // eat comma
+ continue
+ } else {
+ break
+ }
+ }
+
+ err := p.requireTokenType(scanner.CPAREN, `")"`)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ast.Call{
+ Func: funcName,
+ Args: args,
+ Posx: startPos,
+ }, nil
+ }
+
+ varNode := &ast.VariableAccess{
+ Name: first.Content,
+ Posx: startPos,
+ }
+
+ if p.peeker.Peek().Type == scanner.OBRACKET {
+ // index operator
+ startPos := p.peeker.Read().Pos // eat bracket
+ indexExpr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ err = p.requireTokenType(scanner.CBRACKET, `"]"`)
+ if err != nil {
+ return nil, err
+ }
+ return &ast.Index{
+ Target: varNode,
+ Key: indexExpr,
+ Posx: startPos,
+ }, nil
+ }
+
+ return varNode, nil
+}
+
+// requireTokenType consumes the next token an returns an error if its
+// type does not match the given type. nil is returned if the type matches.
+//
+// This is a helper around peeker.Read() for situations where the parser just
+// wants to assert that a particular token type must be present.
+func (p *parser) requireTokenType(wantType scanner.TokenType, wantName string) error {
+ token := p.peeker.Read()
+ if token.Type != wantType {
+ return ExpectationError(wantName, token)
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/hil/scanner/peeker.go b/vendor/github.com/hashicorp/hil/scanner/peeker.go
new file mode 100644
index 00000000..4de37283
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/scanner/peeker.go
@@ -0,0 +1,55 @@
+package scanner
+
+// Peeker is a utility that wraps a token channel returned by Scan and
+// provides an interface that allows a caller (e.g. the parser) to
+// work with the token stream in a mode that allows one token of lookahead,
+// and provides utilities for more convenient processing of the stream.
+type Peeker struct {
+ ch <-chan *Token
+ peeked *Token
+}
+
+func NewPeeker(ch <-chan *Token) *Peeker {
+ return &Peeker{
+ ch: ch,
+ }
+}
+
+// Peek returns the next token in the stream without consuming it. A
+// subsequent call to Read will return the same token.
+func (p *Peeker) Peek() *Token {
+ if p.peeked == nil {
+ p.peeked = <-p.ch
+ }
+ return p.peeked
+}
+
+// Read consumes the next token in the stream and returns it.
+func (p *Peeker) Read() *Token {
+ token := p.Peek()
+
+ // As a special case, we will produce the EOF token forever once
+ // it is reached.
+ if token.Type != EOF {
+ p.peeked = nil
+ }
+
+ return token
+}
+
+// Close ensures that the token stream has been exhausted, to prevent
+// the goroutine in the underlying scanner from leaking.
+//
+// It's not necessary to call this if the caller reads the token stream
+// to EOF, since that implicitly closes the scanner.
+func (p *Peeker) Close() {
+ for _ = range p.ch {
+ // discard
+ }
+ // Install a synthetic EOF token in 'peeked' in case someone
+ // erroneously calls Peek() or Read() after we've closed.
+ p.peeked = &Token{
+ Type: EOF,
+ Content: "",
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go
new file mode 100644
index 00000000..bab86c67
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/scanner/scanner.go
@@ -0,0 +1,550 @@
+package scanner
+
+import (
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+// Scan returns a channel that recieves Tokens from the given input string.
+//
+// The scanner's job is just to partition the string into meaningful parts.
+// It doesn't do any transformation of the raw input string, so the caller
+// must deal with any further interpretation required, such as parsing INTEGER
+// tokens into real ints, or dealing with escape sequences in LITERAL or
+// STRING tokens.
+//
+// Strings in the returned tokens are slices from the original string.
+//
+// startPos should be set to ast.InitPos unless the caller knows that
+// this interpolation string is part of a larger file and knows the position
+// of the first character in that larger file.
+func Scan(s string, startPos ast.Pos) <-chan *Token {
+ ch := make(chan *Token)
+ go scan(s, ch, startPos)
+ return ch
+}
+
+func scan(s string, ch chan<- *Token, pos ast.Pos) {
+ // 'remain' starts off as the whole string but we gradually
+ // slice of the front of it as we work our way through.
+ remain := s
+
+ // nesting keeps track of how many ${ .. } sequences we are
+ // inside, so we can recognize the minor differences in syntax
+ // between outer string literals (LITERAL tokens) and quoted
+ // string literals (STRING tokens).
+ nesting := 0
+
+ // We're going to flip back and forth between parsing literals/strings
+ // and parsing interpolation sequences ${ .. } until we reach EOF or
+ // some INVALID token.
+All:
+ for {
+ startPos := pos
+ // Literal string processing first, since the beginning of
+ // a string is always outside of an interpolation sequence.
+ literalVal, terminator := scanLiteral(remain, pos, nesting > 0)
+
+ if len(literalVal) > 0 {
+ litType := LITERAL
+ if nesting > 0 {
+ litType = STRING
+ }
+ ch <- &Token{
+ Type: litType,
+ Content: literalVal,
+ Pos: startPos,
+ }
+ remain = remain[len(literalVal):]
+ }
+
+ ch <- terminator
+ remain = remain[len(terminator.Content):]
+ pos = terminator.Pos
+ // Safe to use len() here because none of the terminator tokens
+ // can contain UTF-8 sequences.
+ pos.Column = pos.Column + len(terminator.Content)
+
+ switch terminator.Type {
+ case INVALID:
+ // Synthetic EOF after invalid token, since further scanning
+ // is likely to just produce more garbage.
+ ch <- &Token{
+ Type: EOF,
+ Content: "",
+ Pos: pos,
+ }
+ break All
+ case EOF:
+ // All done!
+ break All
+ case BEGIN:
+ nesting++
+ case CQUOTE:
+ // nothing special to do
+ default:
+ // Should never happen
+ panic("invalid string/literal terminator")
+ }
+
+ // Now we do the processing of the insides of ${ .. } sequences.
+ // This loop terminates when we encounter either a closing } or
+ // an opening ", which will cause us to return to literal processing.
+ Interpolation:
+ for {
+
+ token, size, newPos := scanInterpolationToken(remain, pos)
+ ch <- token
+ remain = remain[size:]
+ pos = newPos
+
+ switch token.Type {
+ case INVALID:
+ // Synthetic EOF after invalid token, since further scanning
+ // is likely to just produce more garbage.
+ ch <- &Token{
+ Type: EOF,
+ Content: "",
+ Pos: pos,
+ }
+ break All
+ case EOF:
+ // All done
+ // (though a syntax error that we'll catch in the parser)
+ break All
+ case END:
+ nesting--
+ if nesting < 0 {
+ // Can happen if there are unbalanced ${ and } sequences
+ // in the input, which we'll catch in the parser.
+ nesting = 0
+ }
+ break Interpolation
+ case OQUOTE:
+ // Beginning of nested quoted string
+ break Interpolation
+ }
+ }
+ }
+
+ close(ch)
+}
+
+// Returns the token found at the start of the given string, followed by
+// the number of bytes that were consumed from the string and the adjusted
+// source position.
+//
+// Note that the number of bytes consumed can be more than the length of
+// the returned token contents if the string begins with whitespace, since
+// it will be silently consumed before reading the token.
+func scanInterpolationToken(s string, startPos ast.Pos) (*Token, int, ast.Pos) {
+ pos := startPos
+ size := 0
+
+ // Consume whitespace, if any
+ for len(s) > 0 && byteIsSpace(s[0]) {
+ if s[0] == '\n' {
+ pos.Column = 1
+ pos.Line++
+ } else {
+ pos.Column++
+ }
+ size++
+ s = s[1:]
+ }
+
+ // Unexpected EOF during sequence
+ if len(s) == 0 {
+ return &Token{
+ Type: EOF,
+ Content: "",
+ Pos: pos,
+ }, size, pos
+ }
+
+ next := s[0]
+ var token *Token
+
+ switch next {
+ case '(', ')', '[', ']', ',', '.', '+', '-', '*', '/', '%', '?', ':':
+ // Easy punctuation symbols that don't have any special meaning
+ // during scanning, and that stand for themselves in the
+ // TokenType enumeration.
+ token = &Token{
+ Type: TokenType(next),
+ Content: s[:1],
+ Pos: pos,
+ }
+ case '}':
+ token = &Token{
+ Type: END,
+ Content: s[:1],
+ Pos: pos,
+ }
+ case '"':
+ token = &Token{
+ Type: OQUOTE,
+ Content: s[:1],
+ Pos: pos,
+ }
+ case '!':
+ if len(s) >= 2 && s[:2] == "!=" {
+ token = &Token{
+ Type: NOTEQUAL,
+ Content: s[:2],
+ Pos: pos,
+ }
+ } else {
+ token = &Token{
+ Type: BANG,
+ Content: s[:1],
+ Pos: pos,
+ }
+ }
+ case '<':
+ if len(s) >= 2 && s[:2] == "<=" {
+ token = &Token{
+ Type: LTE,
+ Content: s[:2],
+ Pos: pos,
+ }
+ } else {
+ token = &Token{
+ Type: LT,
+ Content: s[:1],
+ Pos: pos,
+ }
+ }
+ case '>':
+ if len(s) >= 2 && s[:2] == ">=" {
+ token = &Token{
+ Type: GTE,
+ Content: s[:2],
+ Pos: pos,
+ }
+ } else {
+ token = &Token{
+ Type: GT,
+ Content: s[:1],
+ Pos: pos,
+ }
+ }
+ case '=':
+ if len(s) >= 2 && s[:2] == "==" {
+ token = &Token{
+ Type: EQUAL,
+ Content: s[:2],
+ Pos: pos,
+ }
+ } else {
+ // A single equals is not a valid operator
+ token = &Token{
+ Type: INVALID,
+ Content: s[:1],
+ Pos: pos,
+ }
+ }
+ case '&':
+ if len(s) >= 2 && s[:2] == "&&" {
+ token = &Token{
+ Type: AND,
+ Content: s[:2],
+ Pos: pos,
+ }
+ } else {
+ token = &Token{
+ Type: INVALID,
+ Content: s[:1],
+ Pos: pos,
+ }
+ }
+ case '|':
+ if len(s) >= 2 && s[:2] == "||" {
+ token = &Token{
+ Type: OR,
+ Content: s[:2],
+ Pos: pos,
+ }
+ } else {
+ token = &Token{
+ Type: INVALID,
+ Content: s[:1],
+ Pos: pos,
+ }
+ }
+ default:
+ if next >= '0' && next <= '9' {
+ num, numType := scanNumber(s)
+ token = &Token{
+ Type: numType,
+ Content: num,
+ Pos: pos,
+ }
+ } else if stringStartsWithIdentifier(s) {
+ ident, runeLen := scanIdentifier(s)
+ tokenType := IDENTIFIER
+ if ident == "true" || ident == "false" {
+ tokenType = BOOL
+ }
+ token = &Token{
+ Type: tokenType,
+ Content: ident,
+ Pos: pos,
+ }
+ // Skip usual token handling because it doesn't
+ // know how to deal with UTF-8 sequences.
+ pos.Column = pos.Column + runeLen
+ return token, size + len(ident), pos
+ } else {
+ _, byteLen := utf8.DecodeRuneInString(s)
+ token = &Token{
+ Type: INVALID,
+ Content: s[:byteLen],
+ Pos: pos,
+ }
+ // Skip usual token handling because it doesn't
+ // know how to deal with UTF-8 sequences.
+ pos.Column = pos.Column + 1
+ return token, size + byteLen, pos
+ }
+ }
+
+ // Here we assume that the token content contains no UTF-8 sequences,
+ // because we dealt with UTF-8 characters as a special case where
+ // necessary above.
+ size = size + len(token.Content)
+ pos.Column = pos.Column + len(token.Content)
+
+ return token, size, pos
+}
+
+// Returns the (possibly-empty) prefix of the given string that represents
+// a literal, followed by the token that marks the end of the literal.
+func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) {
+ litLen := 0
+ pos := startPos
+ var terminator *Token
+ for {
+
+ if litLen >= len(s) {
+ if nested {
+ // We've ended in the middle of a quoted string,
+ // which means this token is actually invalid.
+ return "", &Token{
+ Type: INVALID,
+ Content: s,
+ Pos: startPos,
+ }
+ }
+ terminator = &Token{
+ Type: EOF,
+ Content: "",
+ Pos: pos,
+ }
+ break
+ }
+
+ next := s[litLen]
+
+ if next == '$' && len(s) > litLen+1 {
+ follow := s[litLen+1]
+
+ if follow == '{' {
+ terminator = &Token{
+ Type: BEGIN,
+ Content: s[litLen : litLen+2],
+ Pos: pos,
+ }
+ pos.Column = pos.Column + 2
+ break
+ } else if follow == '$' {
+ // Double-$ escapes the special processing of $,
+ // so we will consume both characters here.
+ pos.Column = pos.Column + 2
+ litLen = litLen + 2
+ continue
+ }
+ }
+
+ // special handling that applies only to quoted strings
+ if nested {
+ if next == '"' {
+ terminator = &Token{
+ Type: CQUOTE,
+ Content: s[litLen : litLen+1],
+ Pos: pos,
+ }
+ pos.Column = pos.Column + 1
+ break
+ }
+
+ // Escaped quote marks do not terminate the string.
+ //
+ // All we do here in the scanner is avoid terminating a string
+ // due to an escaped quote. The parser is responsible for the
+ // full handling of escape sequences, since it's able to produce
+ // better error messages than we can produce in here.
+ if next == '\\' && len(s) > litLen+1 {
+ follow := s[litLen+1]
+
+ if follow == '"' {
+ // \" escapes the special processing of ",
+ // so we will consume both characters here.
+ pos.Column = pos.Column + 2
+ litLen = litLen + 2
+ continue
+ }
+ }
+ }
+
+ if next == '\n' {
+ pos.Column = 1
+ pos.Line++
+ litLen++
+ } else {
+ pos.Column++
+
+ // "Column" measures runes, so we need to actually consume
+ // a valid UTF-8 character here.
+ _, size := utf8.DecodeRuneInString(s[litLen:])
+ litLen = litLen + size
+ }
+
+ }
+
+ return s[:litLen], terminator
+}
+
+// scanNumber returns the extent of the prefix of the string that represents
+// a valid number, along with what type of number it represents: INT or FLOAT.
+//
+// scanNumber does only basic character analysis: numbers consist of digits
+// and periods, with at least one period signalling a FLOAT. It's the parser's
+// responsibility to validate the form and range of the number, such as ensuring
+// that a FLOAT actually contains only one period, etc.
+func scanNumber(s string) (string, TokenType) {
+ period := -1
+ byteLen := 0
+ numType := INTEGER
+ for {
+ if byteLen >= len(s) {
+ break
+ }
+
+ next := s[byteLen]
+ if next != '.' && (next < '0' || next > '9') {
+ // If our last value was a period, then we're not a float,
+ // we're just an integer that ends in a period.
+ if period == byteLen-1 {
+ byteLen--
+ numType = INTEGER
+ }
+
+ break
+ }
+
+ if next == '.' {
+ // If we've already seen a period, break out
+ if period >= 0 {
+ break
+ }
+
+ period = byteLen
+ numType = FLOAT
+ }
+
+ byteLen++
+ }
+
+ return s[:byteLen], numType
+}
+
+// scanIdentifier returns the extent of the prefix of the string that
+// represents a valid identifier, along with the length of that prefix
+// in runes.
+//
+// Identifiers may contain utf8-encoded non-Latin letters, which will
+// cause the returned "rune length" to be shorter than the byte length
+// of the returned string.
+func scanIdentifier(s string) (string, int) {
+ byteLen := 0
+ runeLen := 0
+ for {
+ if byteLen >= len(s) {
+ break
+ }
+
+ nextRune, size := utf8.DecodeRuneInString(s[byteLen:])
+ if !(nextRune == '_' ||
+ nextRune == '-' ||
+ nextRune == '.' ||
+ nextRune == '*' ||
+ unicode.IsNumber(nextRune) ||
+ unicode.IsLetter(nextRune) ||
+ unicode.IsMark(nextRune)) {
+ break
+ }
+
+ // If we reach a star, it must be between periods to be part
+ // of the same identifier.
+ if nextRune == '*' && s[byteLen-1] != '.' {
+ break
+ }
+
+ // If our previous character was a star, then the current must
+ // be period. Otherwise, undo that and exit.
+ if byteLen > 0 && s[byteLen-1] == '*' && nextRune != '.' {
+ byteLen--
+ if s[byteLen-1] == '.' {
+ byteLen--
+ }
+
+ break
+ }
+
+ byteLen = byteLen + size
+ runeLen = runeLen + 1
+ }
+
+ return s[:byteLen], runeLen
+}
+
+// byteIsSpace implements a restrictive interpretation of spaces that includes
+// only what's valid inside interpolation sequences: spaces, tabs, newlines.
+func byteIsSpace(b byte) bool {
+ switch b {
+ case ' ', '\t', '\r', '\n':
+ return true
+ default:
+ return false
+ }
+}
+
+// stringStartsWithIdentifier returns true if the given string begins with
+// a character that is a legal start of an identifier: an underscore or
+// any character that Unicode considers to be a letter.
+func stringStartsWithIdentifier(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+
+ first := s[0]
+
+ // Easy ASCII cases first
+ if (first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_' {
+ return true
+ }
+
+ // If our first byte begins a UTF-8 sequence then the sequence might
+ // be a unicode letter.
+ if utf8.RuneStart(first) {
+ firstRune, _ := utf8.DecodeRuneInString(s)
+ if unicode.IsLetter(firstRune) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/hil/scanner/token.go b/vendor/github.com/hashicorp/hil/scanner/token.go
new file mode 100644
index 00000000..b6c82ae9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/scanner/token.go
@@ -0,0 +1,105 @@
+package scanner
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+type Token struct {
+ Type TokenType
+ Content string
+ Pos ast.Pos
+}
+
+//go:generate stringer -type=TokenType
+type TokenType rune
+
+const (
+ // Raw string data outside of ${ .. } sequences
+ LITERAL TokenType = 'o'
+
+ // STRING is like a LITERAL but it's inside a quoted string
+ // within a ${ ... } sequence, and so it can contain backslash
+ // escaping.
+ STRING TokenType = 'S'
+
+ // Other Literals
+ INTEGER TokenType = 'I'
+ FLOAT TokenType = 'F'
+ BOOL TokenType = 'B'
+
+ BEGIN TokenType = '$' // actually "${"
+ END TokenType = '}'
+ OQUOTE TokenType = '“' // Opening quote of a nested quoted sequence
+ CQUOTE TokenType = '”' // Closing quote of a nested quoted sequence
+ OPAREN TokenType = '('
+ CPAREN TokenType = ')'
+ OBRACKET TokenType = '['
+ CBRACKET TokenType = ']'
+ COMMA TokenType = ','
+
+ IDENTIFIER TokenType = 'i'
+
+ PERIOD TokenType = '.'
+ PLUS TokenType = '+'
+ MINUS TokenType = '-'
+ STAR TokenType = '*'
+ SLASH TokenType = '/'
+ PERCENT TokenType = '%'
+
+ AND TokenType = '∧'
+ OR TokenType = '∨'
+ BANG TokenType = '!'
+
+ EQUAL TokenType = '='
+ NOTEQUAL TokenType = '≠'
+ GT TokenType = '>'
+ LT TokenType = '<'
+ GTE TokenType = '≥'
+ LTE TokenType = '≤'
+
+ QUESTION TokenType = '?'
+ COLON TokenType = ':'
+
+ EOF TokenType = '␄'
+
+ // Produced for sequences that cannot be understood as valid tokens
+ // e.g. due to use of unrecognized punctuation.
+ INVALID TokenType = '�'
+)
+
+func (t *Token) String() string {
+ switch t.Type {
+ case EOF:
+ return "end of string"
+ case INVALID:
+ return fmt.Sprintf("invalid sequence %q", t.Content)
+ case INTEGER:
+ return fmt.Sprintf("integer %s", t.Content)
+ case FLOAT:
+ return fmt.Sprintf("float %s", t.Content)
+ case STRING:
+ return fmt.Sprintf("string %q", t.Content)
+ case LITERAL:
+ return fmt.Sprintf("literal %q", t.Content)
+ case OQUOTE:
+ return fmt.Sprintf("opening quote")
+ case CQUOTE:
+ return fmt.Sprintf("closing quote")
+ case AND:
+ return "&&"
+ case OR:
+ return "||"
+ case NOTEQUAL:
+ return "!="
+ case GTE:
+ return ">="
+ case LTE:
+ return "<="
+ default:
+ // The remaining token types have content that
+ // speaks for itself.
+ return fmt.Sprintf("%q", t.Content)
+ }
+}
diff --git a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go
new file mode 100644
index 00000000..a602f5fd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go
@@ -0,0 +1,51 @@
+// Code generated by "stringer -type=TokenType"; DO NOT EDIT
+
+package scanner
+
+import "fmt"
+
+const _TokenType_name = "BANGBEGINPERCENTOPARENCPARENSTARPLUSCOMMAMINUSPERIODSLASHCOLONLTEQUALGTQUESTIONBOOLFLOATINTEGERSTRINGOBRACKETCBRACKETIDENTIFIERLITERALENDOQUOTECQUOTEANDORNOTEQUALLTEGTEEOFINVALID"
+
+var _TokenType_map = map[TokenType]string{
+ 33: _TokenType_name[0:4],
+ 36: _TokenType_name[4:9],
+ 37: _TokenType_name[9:16],
+ 40: _TokenType_name[16:22],
+ 41: _TokenType_name[22:28],
+ 42: _TokenType_name[28:32],
+ 43: _TokenType_name[32:36],
+ 44: _TokenType_name[36:41],
+ 45: _TokenType_name[41:46],
+ 46: _TokenType_name[46:52],
+ 47: _TokenType_name[52:57],
+ 58: _TokenType_name[57:62],
+ 60: _TokenType_name[62:64],
+ 61: _TokenType_name[64:69],
+ 62: _TokenType_name[69:71],
+ 63: _TokenType_name[71:79],
+ 66: _TokenType_name[79:83],
+ 70: _TokenType_name[83:88],
+ 73: _TokenType_name[88:95],
+ 83: _TokenType_name[95:101],
+ 91: _TokenType_name[101:109],
+ 93: _TokenType_name[109:117],
+ 105: _TokenType_name[117:127],
+ 111: _TokenType_name[127:134],
+ 125: _TokenType_name[134:137],
+ 8220: _TokenType_name[137:143],
+ 8221: _TokenType_name[143:149],
+ 8743: _TokenType_name[149:152],
+ 8744: _TokenType_name[152:154],
+ 8800: _TokenType_name[154:162],
+ 8804: _TokenType_name[162:165],
+ 8805: _TokenType_name[165:168],
+ 9220: _TokenType_name[168:171],
+ 65533: _TokenType_name[171:178],
+}
+
+func (i TokenType) String() string {
+ if str, ok := _TokenType_map[i]; ok {
+ return str
+ }
+ return fmt.Sprintf("TokenType(%d)", i)
+}
diff --git a/vendor/github.com/hashicorp/hil/transform_fixed.go b/vendor/github.com/hashicorp/hil/transform_fixed.go
new file mode 100644
index 00000000..e69df294
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/transform_fixed.go
@@ -0,0 +1,29 @@
+package hil
+
+import (
+ "github.com/hashicorp/hil/ast"
+)
+
+// FixedValueTransform transforms an AST to return a fixed value for
+// all interpolations. i.e. you can make "hi ${anything}" always
+// turn into "hi foo".
+//
+// The primary use case for this is for config validations where you can
+// verify that interpolations result in a certain type of string.
+func FixedValueTransform(root ast.Node, Value *ast.LiteralNode) ast.Node {
+ // We visit the nodes in top-down order
+ result := root
+ switch n := result.(type) {
+ case *ast.Output:
+ for i, v := range n.Exprs {
+ n.Exprs[i] = FixedValueTransform(v, Value)
+ }
+ case *ast.LiteralNode:
+ // We keep it as-is
+ default:
+ // Anything else we replace
+ result = Value
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/hil/walk.go b/vendor/github.com/hashicorp/hil/walk.go
new file mode 100644
index 00000000..0ace8306
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/walk.go
@@ -0,0 +1,266 @@
+package hil
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/hashicorp/hil/ast"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// WalkFn is the type of function to pass to Walk. Modify fields within
+// WalkData to control whether replacement happens.
+type WalkFn func(*WalkData) error
+
+// WalkData is the structure passed to the callback of the Walk function.
+//
+// This structure contains data passed in as well as fields that are expected
+// to be written by the caller as a result. Please see the documentation for
+// each field for more information.
+type WalkData struct {
+ // Root is the parsed root of this HIL program
+ Root ast.Node
+
+ // Location is the location within the structure where this
+ // value was found. This can be used to modify behavior within
+ // slices and so on.
+ Location reflectwalk.Location
+
+ // The below two values must be set by the callback to have any effect.
+ //
+ // Replace, if true, will replace the value in the structure with
+ // ReplaceValue. It is up to the caller to make sure this is a string.
+ Replace bool
+ ReplaceValue string
+}
+
+// Walk will walk an arbitrary Go structure and parse any string as an
+// HIL program and call the callback cb to determine what to replace it
+// with.
+//
+// This function is very useful for arbitrary HIL program interpolation
+// across a complex configuration structure. Due to the heavy use of
+// reflection in this function, it is recommend to write many unit tests
+// with your typical configuration structures to hilp mitigate the risk
+// of panics.
+func Walk(v interface{}, cb WalkFn) error {
+ walker := &interpolationWalker{F: cb}
+ return reflectwalk.Walk(v, walker)
+}
+
+// interpolationWalker implements interfaces for the reflectwalk package
+// (github.com/mitchellh/reflectwalk) that can be used to automatically
+// execute a callback for an interpolation.
+type interpolationWalker struct {
+ F WalkFn
+
+ key []string
+ lastValue reflect.Value
+ loc reflectwalk.Location
+ cs []reflect.Value
+ csKey []reflect.Value
+ csData interface{}
+ sliceIndex int
+ unknownKeys []string
+}
+
+func (w *interpolationWalker) Enter(loc reflectwalk.Location) error {
+ w.loc = loc
+ return nil
+}
+
+func (w *interpolationWalker) Exit(loc reflectwalk.Location) error {
+ w.loc = reflectwalk.None
+
+ switch loc {
+ case reflectwalk.Map:
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.MapValue:
+ w.key = w.key[:len(w.key)-1]
+ w.csKey = w.csKey[:len(w.csKey)-1]
+ case reflectwalk.Slice:
+ // Split any values that need to be split
+ w.splitSlice()
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.SliceElem:
+ w.csKey = w.csKey[:len(w.csKey)-1]
+ }
+
+ return nil
+}
+
+func (w *interpolationWalker) Map(m reflect.Value) error {
+ w.cs = append(w.cs, m)
+ return nil
+}
+
+func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error {
+ w.csData = k
+ w.csKey = append(w.csKey, k)
+ w.key = append(w.key, k.String())
+ w.lastValue = v
+ return nil
+}
+
+func (w *interpolationWalker) Slice(s reflect.Value) error {
+ w.cs = append(w.cs, s)
+ return nil
+}
+
+func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error {
+ w.csKey = append(w.csKey, reflect.ValueOf(i))
+ w.sliceIndex = i
+ return nil
+}
+
+func (w *interpolationWalker) Primitive(v reflect.Value) error {
+ setV := v
+
+ // We only care about strings
+ if v.Kind() == reflect.Interface {
+ setV = v
+ v = v.Elem()
+ }
+ if v.Kind() != reflect.String {
+ return nil
+ }
+
+ astRoot, err := Parse(v.String())
+ if err != nil {
+ return err
+ }
+
+ // If the AST we got is just a literal string value with the same
+ // value then we ignore it. We have to check if its the same value
+ // because it is possible to input a string, get out a string, and
+ // have it be different. For example: "foo-$${bar}" turns into
+ // "foo-${bar}"
+ if n, ok := astRoot.(*ast.LiteralNode); ok {
+ if s, ok := n.Value.(string); ok && s == v.String() {
+ return nil
+ }
+ }
+
+ if w.F == nil {
+ return nil
+ }
+
+ data := WalkData{Root: astRoot, Location: w.loc}
+ if err := w.F(&data); err != nil {
+ return fmt.Errorf(
+ "%s in:\n\n%s",
+ err, v.String())
+ }
+
+ if data.Replace {
+ /*
+ if remove {
+ w.removeCurrent()
+ return nil
+ }
+ */
+
+ resultVal := reflect.ValueOf(data.ReplaceValue)
+ switch w.loc {
+ case reflectwalk.MapKey:
+ m := w.cs[len(w.cs)-1]
+
+ // Delete the old value
+ var zero reflect.Value
+ m.SetMapIndex(w.csData.(reflect.Value), zero)
+
+ // Set the new key with the existing value
+ m.SetMapIndex(resultVal, w.lastValue)
+
+ // Set the key to be the new key
+ w.csData = resultVal
+ case reflectwalk.MapValue:
+ // If we're in a map, then the only way to set a map value is
+ // to set it directly.
+ m := w.cs[len(w.cs)-1]
+ mk := w.csData.(reflect.Value)
+ m.SetMapIndex(mk, resultVal)
+ default:
+ // Otherwise, we should be addressable
+ setV.Set(resultVal)
+ }
+ }
+
+ return nil
+}
+
+func (w *interpolationWalker) removeCurrent() {
+ // Append the key to the unknown keys
+ w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
+
+ for i := 1; i <= len(w.cs); i++ {
+ c := w.cs[len(w.cs)-i]
+ switch c.Kind() {
+ case reflect.Map:
+ // Zero value so that we delete the map key
+ var val reflect.Value
+
+ // Get the key and delete it
+ k := w.csData.(reflect.Value)
+ c.SetMapIndex(k, val)
+ return
+ }
+ }
+
+ panic("No container found for removeCurrent")
+}
+
+func (w *interpolationWalker) replaceCurrent(v reflect.Value) {
+ c := w.cs[len(w.cs)-2]
+ switch c.Kind() {
+ case reflect.Map:
+ // Get the key and delete it
+ k := w.csKey[len(w.csKey)-1]
+ c.SetMapIndex(k, v)
+ }
+}
+
+func (w *interpolationWalker) splitSlice() {
+ // Get the []interface{} slice so we can do some operations on
+ // it without dealing with reflection. We'll document each step
+ // here to be clear.
+ var s []interface{}
+ raw := w.cs[len(w.cs)-1]
+ switch v := raw.Interface().(type) {
+ case []interface{}:
+ s = v
+ case []map[string]interface{}:
+ return
+ default:
+ panic("Unknown kind: " + raw.Kind().String())
+ }
+
+ // Check if we have any elements that we need to split. If not, then
+ // just return since we're done.
+ split := false
+ if !split {
+ return
+ }
+
+ // Make a new result slice that is twice the capacity to fit our growth.
+ result := make([]interface{}, 0, len(s)*2)
+
+ // Go over each element of the original slice and start building up
+ // the resulting slice by splitting where we have to.
+ for _, v := range s {
+ sv, ok := v.(string)
+ if !ok {
+ // Not a string, so just set it
+ result = append(result, v)
+ continue
+ }
+
+ // Not a string list, so just set it
+ result = append(result, sv)
+ }
+
+ // Our slice is now done, we have to replace the slice now
+ // with this new one that we have.
+ w.replaceCurrent(reflect.ValueOf(result))
+}
diff --git a/vendor/github.com/hashicorp/logutils/LICENSE b/vendor/github.com/hashicorp/logutils/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/logutils/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/logutils/README.md b/vendor/github.com/hashicorp/logutils/README.md
new file mode 100644
index 00000000..49490eae
--- /dev/null
+++ b/vendor/github.com/hashicorp/logutils/README.md
@@ -0,0 +1,36 @@
+# logutils
+
+logutils is a Go package that augments the standard library "log" package
+to make logging a bit more modern, without fragmenting the Go ecosystem
+with new logging packages.
+
+## The simplest thing that could possibly work
+
+Presumably your application already uses the default `log` package. To switch, you'll want your code to look like the following:
+
+```go
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/hashicorp/logutils"
+)
+
+func main() {
+ filter := &logutils.LevelFilter{
+ Levels: []logutils.LogLevel{"DEBUG", "WARN", "ERROR"},
+ MinLevel: logutils.LogLevel("WARN"),
+ Writer: os.Stderr,
+ }
+ log.SetOutput(filter)
+
+ log.Print("[DEBUG] Debugging") // this will not print
+ log.Print("[WARN] Warning") // this will
+ log.Print("[ERROR] Erring") // and so will this
+ log.Print("Message I haven't updated") // and so will this
+}
+```
+
+This logs to standard error exactly like go's standard logger. Any log messages you haven't converted to have a level will continue to print as before.
diff --git a/vendor/github.com/hashicorp/logutils/level.go b/vendor/github.com/hashicorp/logutils/level.go
new file mode 100644
index 00000000..6381bf16
--- /dev/null
+++ b/vendor/github.com/hashicorp/logutils/level.go
@@ -0,0 +1,81 @@
+// Package logutils augments the standard log package with levels.
+package logutils
+
+import (
+ "bytes"
+ "io"
+ "sync"
+)
+
+type LogLevel string
+
+// LevelFilter is an io.Writer that can be used with a logger that
+// will filter out log messages that aren't at least a certain level.
+//
+// Once the filter is in use somewhere, it is not safe to modify
+// the structure.
+type LevelFilter struct {
+ // Levels is the list of log levels, in increasing order of
+ // severity. Example might be: {"DEBUG", "WARN", "ERROR"}.
+ Levels []LogLevel
+
+ // MinLevel is the minimum level allowed through
+ MinLevel LogLevel
+
+ // The underlying io.Writer where log messages that pass the filter
+ // will be set.
+ Writer io.Writer
+
+ badLevels map[LogLevel]struct{}
+ once sync.Once
+}
+
+// Check will check a given line if it would be included in the level
+// filter.
+func (f *LevelFilter) Check(line []byte) bool {
+ f.once.Do(f.init)
+
+ // Check for a log level
+ var level LogLevel
+ x := bytes.IndexByte(line, '[')
+ if x >= 0 {
+ y := bytes.IndexByte(line[x:], ']')
+ if y >= 0 {
+ level = LogLevel(line[x+1 : x+y])
+ }
+ }
+
+ _, ok := f.badLevels[level]
+ return !ok
+}
+
+func (f *LevelFilter) Write(p []byte) (n int, err error) {
+ // Note in general that io.Writer can receive any byte sequence
+ // to write, but the "log" package always guarantees that we only
+ // get a single line. We use that as a slight optimization within
+ // this method, assuming we're dealing with a single, complete line
+ // of log data.
+
+ if !f.Check(p) {
+ return len(p), nil
+ }
+
+ return f.Writer.Write(p)
+}
+
+// SetMinLevel is used to update the minimum log level
+func (f *LevelFilter) SetMinLevel(min LogLevel) {
+ f.MinLevel = min
+ f.init()
+}
+
+func (f *LevelFilter) init() {
+ badLevels := make(map[LogLevel]struct{})
+ for _, level := range f.Levels {
+ if level == f.MinLevel {
+ break
+ }
+ badLevels[level] = struct{}{}
+ }
+ f.badLevels = badLevels
+}
diff --git a/vendor/github.com/hashicorp/terraform/LICENSE b/vendor/github.com/hashicorp/terraform/LICENSE
new file mode 100644
index 00000000..c33dcc7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/terraform/README.md b/vendor/github.com/hashicorp/terraform/README.md
new file mode 100644
index 00000000..351cf0e8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/README.md
@@ -0,0 +1,164 @@
+Terraform
+=========
+
+- Website: https://www.terraform.io
+- [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby)
+- Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool)
+
+![Terraform](https://rawgithub.com/hashicorp/terraform/master/website/source/assets/images/logo-hashicorp.svg)
+
+Terraform is a tool for building, changing, and versioning infrastructure safely and efficiently. Terraform can manage existing and popular service providers as well as custom in-house solutions.
+
+The key features of Terraform are:
+
+- **Infrastructure as Code**: Infrastructure is described using a high-level configuration syntax. This allows a blueprint of your datacenter to be versioned and treated as you would any other code. Additionally, infrastructure can be shared and re-used.
+
+- **Execution Plans**: Terraform has a "planning" step where it generates an *execution plan*. The execution plan shows what Terraform will do when you call apply. This lets you avoid any surprises when Terraform manipulates infrastructure.
+
+- **Resource Graph**: Terraform builds a graph of all your resources, and parallelizes the creation and modification of any non-dependent resources. Because of this, Terraform builds infrastructure as efficiently as possible, and operators get insight into dependencies in their infrastructure.
+
+- **Change Automation**: Complex changesets can be applied to your infrastructure with minimal human interaction. With the previously mentioned execution plan and resource graph, you know exactly what Terraform will change and in what order, avoiding many possible human errors.
+
+For more information, see the [introduction section](http://www.terraform.io/intro) of the Terraform website.
+
+Getting Started & Documentation
+-------------------------------
+
+All documentation is available on the [Terraform website](http://www.terraform.io).
+
+Developing Terraform
+--------------------
+
+If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.8+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
+
+For local dev first make sure Go is properly installed, including setting up a [GOPATH](http://golang.org/doc/code.html#GOPATH). You will also need to add `$GOPATH/bin` to your `$PATH`.
+
+Next, using [Git](https://git-scm.com/), clone this repository into `$GOPATH/src/github.com/hashicorp/terraform`. All the necessary dependencies are either vendored or automatically installed, so you just need to type `make`. This will compile the code and then run the tests. If this exits with exit status 0, then everything is working!
+
+```sh
+$ cd "$GOPATH/src/github.com/hashicorp/terraform"
+$ make
+```
+
+To compile a development version of Terraform and the built-in plugins, run `make dev`. This will build everything using [gox](https://github.com/mitchellh/gox) and put Terraform binaries in the `bin` and `$GOPATH/bin` folders:
+
+```sh
+$ make dev
+...
+$ bin/terraform
+...
+```
+
+If you're developing a specific package, you can run tests for just that package by specifying the `TEST` variable. For example below, only`terraform` package tests will be run.
+
+```sh
+$ make test TEST=./terraform
+...
+```
+
+If you're working on a specific provider and only wish to rebuild that provider, you can use the `plugin-dev` target. For example, to build only the Azure provider:
+
+```sh
+$ make plugin-dev PLUGIN=provider-azure
+```
+
+If you're working on the core of Terraform, and only wish to rebuild that without rebuilding providers, you can use the `core-dev` target. It is important to note that some types of changes may require both core and providers to be rebuilt - for example work on the RPC interface. To build just the core of Terraform:
+
+```sh
+$ make core-dev
+```
+
+### Dependencies
+
+Terraform stores its dependencies under `vendor/`, which [Go 1.6+ will automatically recognize and load](https://golang.org/cmd/go/#hdr-Vendor_Directories). We use [`govendor`](https://github.com/kardianos/govendor) to manage the vendored dependencies.
+
+If you're developing Terraform, there are a few tasks you might need to perform.
+
+#### Adding a dependency
+
+If you're adding a dependency, you'll need to vendor it in the same Pull Request as the code that depends on it. You should do this in a separate commit from your code, as makes PR review easier and Git history simpler to read in the future.
+
+To add a dependency:
+
+Assuming your work is on a branch called `my-feature-branch`, the steps look like this:
+
+1. Add the new package to your GOPATH:
+
+ ```bash
+ go get github.com/hashicorp/my-project
+ ```
+
+2. Add the new package to your `vendor/` directory:
+
+ ```bash
+ govendor add github.com/hashicorp/my-project/package
+ ```
+
+3. Review the changes in git and commit them.
+
+#### Updating a dependency
+
+To update a dependency:
+
+1. Fetch the dependency:
+
+ ```bash
+ govendor fetch github.com/hashicorp/my-project
+ ```
+
+2. Review the changes in git and commit them.
+
+### Acceptance Tests
+
+Terraform has a comprehensive [acceptance
+test](http://en.wikipedia.org/wiki/Acceptance_testing) suite covering the
+built-in providers. Our [Contributing Guide](https://github.com/hashicorp/terraform/blob/master/.github/CONTRIBUTING.md) includes details about how and when to write and run acceptance tests in order to help contributions get accepted quickly.
+
+
+### Cross Compilation and Building for Distribution
+
+If you wish to cross-compile Terraform for another architecture, you can set the `XC_OS` and `XC_ARCH` environment variables to values representing the target operating system and architecture before calling `make`. The output is placed in the `pkg` subdirectory tree both expanded in a directory representing the OS/architecture combination and as a ZIP archive.
+
+For example, to compile 64-bit Linux binaries on Mac OS X, you can run:
+
+```sh
+$ XC_OS=linux XC_ARCH=amd64 make bin
+...
+$ file pkg/linux_amd64/terraform
+terraform: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), statically linked, not stripped
+```
+
+`XC_OS` and `XC_ARCH` can be space separated lists representing different combinations of operating system and architecture. For example, to compile for both Linux and Mac OS X, targeting both 32- and 64-bit architectures, you can run:
+
+```sh
+$ XC_OS="linux darwin" XC_ARCH="386 amd64" make bin
+...
+$ tree ./pkg/ -P "terraform|*.zip"
+./pkg/
+├── darwin_386
+│   └── terraform
+├── darwin_386.zip
+├── darwin_amd64
+│   └── terraform
+├── darwin_amd64.zip
+├── linux_386
+│   └── terraform
+├── linux_386.zip
+├── linux_amd64
+│   └── terraform
+└── linux_amd64.zip
+
+4 directories, 8 files
+```
+
+_Note: Cross-compilation uses [gox](https://github.com/mitchellh/gox), which requires toolchains to be built with versions of Go prior to 1.5. In order to successfully cross-compile with older versions of Go, you will need to run `gox -build-toolchain` before running the commands detailed above._
+
+#### Docker
+
+When using docker you don't need to have any of the Go development tools installed and you can clone terraform to any location on disk (doesn't have to be in your $GOPATH). This is useful for users who want to build `master` or a specific branch for testing without setting up a proper Go environment.
+
+For example, run the following command to build terraform in a linux-based container for macOS.
+
+```sh
+docker run --rm -v $(pwd):/go/src/github.com/hashicorp/terraform -w /go/src/github.com/hashicorp/terraform -e XC_OS=darwin -e XC_ARCH=amd64 golang:latest bash -c "apt-get update && apt-get install -y zip && make bin"
+```
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/provider.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/provider.go
new file mode 100644
index 00000000..81462e36
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/provider.go
@@ -0,0 +1,239 @@
+package ignition
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "sync"
+
+ "github.com/coreos/go-systemd/unit"
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// globalCache keeps the instances of the internal types of ignition generated
+// by the different data resources with the goal to be reused by the
+// ignition_config data resource. The key of the maps are a hash of the types
+// calculated on the type serialized to JSON.
+var globalCache = &cache{
+ disks: make(map[string]*types.Disk, 0),
+ arrays: make(map[string]*types.Raid, 0),
+ filesystems: make(map[string]*types.Filesystem, 0),
+ files: make(map[string]*types.File, 0),
+ systemdUnits: make(map[string]*types.SystemdUnit, 0),
+ networkdUnits: make(map[string]*types.NetworkdUnit, 0),
+ users: make(map[string]*types.User, 0),
+ groups: make(map[string]*types.Group, 0),
+}
+
+func Provider() terraform.ResourceProvider {
+ return &schema.Provider{
+ DataSourcesMap: map[string]*schema.Resource{
+ "ignition_config": resourceConfig(),
+ "ignition_disk": resourceDisk(),
+ "ignition_raid": resourceRaid(),
+ "ignition_filesystem": resourceFilesystem(),
+ "ignition_file": resourceFile(),
+ "ignition_systemd_unit": resourceSystemdUnit(),
+ "ignition_networkd_unit": resourceNetworkdUnit(),
+ "ignition_user": resourceUser(),
+ "ignition_group": resourceGroup(),
+ },
+ ResourcesMap: map[string]*schema.Resource{
+ "ignition_config": schema.DataSourceResourceShim(
+ "ignition_config",
+ resourceConfig(),
+ ),
+ "ignition_disk": schema.DataSourceResourceShim(
+ "ignition_disk",
+ resourceDisk(),
+ ),
+ "ignition_raid": schema.DataSourceResourceShim(
+ "ignition_raid",
+ resourceRaid(),
+ ),
+ "ignition_filesystem": schema.DataSourceResourceShim(
+ "ignition_filesystem",
+ resourceFilesystem(),
+ ),
+ "ignition_file": schema.DataSourceResourceShim(
+ "ignition_file",
+ resourceFile(),
+ ),
+ "ignition_systemd_unit": schema.DataSourceResourceShim(
+ "ignition_systemd_unit",
+ resourceSystemdUnit(),
+ ),
+ "ignition_networkd_unit": schema.DataSourceResourceShim(
+ "ignition_networkd_unit",
+ resourceNetworkdUnit(),
+ ),
+ "ignition_user": schema.DataSourceResourceShim(
+ "ignition_user",
+ resourceUser(),
+ ),
+ "ignition_group": schema.DataSourceResourceShim(
+ "ignition_group",
+ resourceGroup(),
+ ),
+ },
+ }
+}
+
+type cache struct {
+ disks map[string]*types.Disk
+ arrays map[string]*types.Raid
+ filesystems map[string]*types.Filesystem
+ files map[string]*types.File
+ systemdUnits map[string]*types.SystemdUnit
+ networkdUnits map[string]*types.NetworkdUnit
+ users map[string]*types.User
+ groups map[string]*types.Group
+
+ sync.Mutex
+}
+
+func (c *cache) addDisk(g *types.Disk) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(g)
+ c.disks[id] = g
+
+ return id
+}
+
+func (c *cache) addRaid(r *types.Raid) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(r)
+ c.arrays[id] = r
+
+ return id
+}
+
+func (c *cache) addFilesystem(f *types.Filesystem) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(f)
+ c.filesystems[id] = f
+
+ return id
+}
+
+func (c *cache) addFile(f *types.File) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(f)
+ c.files[id] = f
+
+ return id
+}
+
+func (c *cache) addSystemdUnit(u *types.SystemdUnit) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(u)
+ c.systemdUnits[id] = u
+
+ return id
+}
+
+func (c *cache) addNetworkdUnit(u *types.NetworkdUnit) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(u)
+ c.networkdUnits[id] = u
+
+ return id
+}
+
+func (c *cache) addUser(u *types.User) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(u)
+ c.users[id] = u
+
+ return id
+}
+
+func (c *cache) addGroup(g *types.Group) string {
+ c.Lock()
+ defer c.Unlock()
+
+ id := id(g)
+ c.groups[id] = g
+
+ return id
+}
+
+func id(input interface{}) string {
+ b, _ := json.Marshal(input)
+ return hash(string(b))
+}
+
+func hash(s string) string {
+ sha := sha256.Sum256([]byte(s))
+ return hex.EncodeToString(sha[:])
+}
+
+func castSliceInterface(i []interface{}) []string {
+ var o []string
+ for _, value := range i {
+ o = append(o, value.(string))
+ }
+
+ return o
+}
+
+func getUInt(d *schema.ResourceData, key string) *uint {
+ var uid *uint
+ if value, ok := d.GetOk(key); ok {
+ u := uint(value.(int))
+ uid = &u
+ }
+
+ return uid
+}
+
+var errEmptyUnit = fmt.Errorf("invalid or empty unit content")
+
+func validateUnitContent(content string) error {
+ c := bytes.NewBufferString(content)
+ unit, err := unit.Deserialize(c)
+ if err != nil {
+ return fmt.Errorf("invalid unit content: %s", err)
+ }
+
+ if len(unit) == 0 {
+ return errEmptyUnit
+ }
+
+ return nil
+}
+
+func buildURL(raw string) (types.Url, error) {
+ u, err := url.Parse(raw)
+ if err != nil {
+ return types.Url{}, err
+ }
+
+ return types.Url(*u), nil
+}
+
+func buildHash(raw string) (types.Hash, error) {
+ h := types.Hash{}
+ err := h.UnmarshalJSON([]byte(fmt.Sprintf("%q", raw)))
+
+ return h, err
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_config.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_config.go
new file mode 100644
index 00000000..c75e50af
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_config.go
@@ -0,0 +1,308 @@
+package ignition
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/hashicorp/terraform/helper/schema"
+
+ "github.com/coreos/ignition/config/types"
+)
+
+var configReferenceResource = &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "source": &schema.Schema{
+ Type: schema.TypeString,
+ ForceNew: true,
+ Required: true,
+ },
+ "verification": &schema.Schema{
+ Type: schema.TypeString,
+ ForceNew: true,
+ Optional: true,
+ },
+ },
+}
+
+func resourceConfig() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceIgnitionFileExists,
+ Read: resourceIgnitionFileRead,
+ Schema: map[string]*schema.Schema{
+ "disks": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "arrays": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "filesystems": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "files": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "systemd": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "networkd": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "users": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "groups": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "replace": &schema.Schema{
+ Type: schema.TypeList,
+ ForceNew: true,
+ Optional: true,
+ MaxItems: 1,
+ Elem: configReferenceResource,
+ },
+ "append": &schema.Schema{
+ Type: schema.TypeList,
+ ForceNew: true,
+ Optional: true,
+ Elem: configReferenceResource,
+ },
+ "rendered": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceIgnitionFileRead(d *schema.ResourceData, meta interface{}) error {
+ rendered, err := renderConfig(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ if err := d.Set("rendered", rendered); err != nil {
+ return err
+ }
+
+ d.SetId(hash(rendered))
+ return nil
+}
+
+func resourceIgnitionFileExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ rendered, err := renderConfig(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return hash(rendered) == d.Id(), nil
+}
+
+func renderConfig(d *schema.ResourceData, c *cache) (string, error) {
+ i, err := buildConfig(d, c)
+ if err != nil {
+ return "", err
+ }
+
+ bytes, err := json.MarshalIndent(i, " ", " ")
+
+ if err != nil {
+ return "", err
+ }
+
+ return string(bytes), nil
+}
+
+func buildConfig(d *schema.ResourceData, c *cache) (*types.Config, error) {
+ var err error
+ config := &types.Config{}
+ config.Ignition, err = buildIgnition(d)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Storage, err = buildStorage(d, c)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Systemd, err = buildSystemd(d, c)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Networkd, err = buildNetworkd(d, c)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Passwd, err = buildPasswd(d, c)
+ if err != nil {
+ return nil, err
+ }
+
+ return config, nil
+}
+
+func buildIgnition(d *schema.ResourceData) (types.Ignition, error) {
+ var err error
+
+ i := types.Ignition{}
+ i.Version.UnmarshalJSON([]byte(`"2.0.0"`))
+
+ rr := d.Get("replace.0").(map[string]interface{})
+ if len(rr) != 0 {
+ i.Config.Replace, err = buildConfigReference(rr)
+ if err != nil {
+ return i, err
+ }
+ }
+
+ ar := d.Get("append").([]interface{})
+ if len(ar) != 0 {
+ for _, rr := range ar {
+ r, err := buildConfigReference(rr.(map[string]interface{}))
+ if err != nil {
+ return i, err
+ }
+
+ i.Config.Append = append(i.Config.Append, *r)
+ }
+ }
+
+ return i, nil
+}
+
+func buildConfigReference(raw map[string]interface{}) (*types.ConfigReference, error) {
+ r := &types.ConfigReference{}
+
+ src, err := buildURL(raw["source"].(string))
+ if err != nil {
+ return nil, err
+ }
+
+ r.Source = src
+
+ hash, err := buildHash(raw["verification"].(string))
+ if err != nil {
+ return nil, err
+ }
+
+ r.Verification.Hash = &hash
+
+ return r, nil
+}
+
+func buildStorage(d *schema.ResourceData, c *cache) (types.Storage, error) {
+ storage := types.Storage{}
+
+ for _, id := range d.Get("disks").([]interface{}) {
+ d, ok := c.disks[id.(string)]
+ if !ok {
+ return storage, fmt.Errorf("invalid disk %q, unknown disk id", id)
+ }
+
+ storage.Disks = append(storage.Disks, *d)
+ }
+
+ for _, id := range d.Get("arrays").([]interface{}) {
+ a, ok := c.arrays[id.(string)]
+ if !ok {
+ return storage, fmt.Errorf("invalid raid %q, unknown raid id", id)
+ }
+
+ storage.Arrays = append(storage.Arrays, *a)
+ }
+
+ for _, id := range d.Get("filesystems").([]interface{}) {
+ f, ok := c.filesystems[id.(string)]
+ if !ok {
+ return storage, fmt.Errorf("invalid filesystem %q, unknown filesystem id", id)
+ }
+
+ storage.Filesystems = append(storage.Filesystems, *f)
+ }
+
+ for _, id := range d.Get("files").([]interface{}) {
+ f, ok := c.files[id.(string)]
+ if !ok {
+ return storage, fmt.Errorf("invalid file %q, unknown file id", id)
+ }
+
+ storage.Files = append(storage.Files, *f)
+ }
+
+ return storage, nil
+
+}
+
+func buildSystemd(d *schema.ResourceData, c *cache) (types.Systemd, error) {
+ systemd := types.Systemd{}
+
+ for _, id := range d.Get("systemd").([]interface{}) {
+ u, ok := c.systemdUnits[id.(string)]
+ if !ok {
+ return systemd, fmt.Errorf("invalid systemd unit %q, unknown systemd unit id", id)
+ }
+
+ systemd.Units = append(systemd.Units, *u)
+ }
+
+ return systemd, nil
+
+}
+
+func buildNetworkd(d *schema.ResourceData, c *cache) (types.Networkd, error) {
+ networkd := types.Networkd{}
+
+ for _, id := range d.Get("networkd").([]interface{}) {
+ u, ok := c.networkdUnits[id.(string)]
+ if !ok {
+ return networkd, fmt.Errorf("invalid networkd unit %q, unknown networkd unit id", id)
+ }
+
+ networkd.Units = append(networkd.Units, *u)
+ }
+
+ return networkd, nil
+}
+
+func buildPasswd(d *schema.ResourceData, c *cache) (types.Passwd, error) {
+ passwd := types.Passwd{}
+
+ for _, id := range d.Get("users").([]interface{}) {
+ u, ok := c.users[id.(string)]
+ if !ok {
+ return passwd, fmt.Errorf("invalid user %q, unknown user id", id)
+ }
+
+ passwd.Users = append(passwd.Users, *u)
+ }
+
+ for _, id := range d.Get("groups").([]interface{}) {
+ g, ok := c.groups[id.(string)]
+ if !ok {
+ return passwd, fmt.Errorf("invalid group %q, unknown group id", id)
+ }
+
+ passwd.Groups = append(passwd.Groups, *g)
+ }
+
+ return passwd, nil
+
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_disk.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_disk.go
new file mode 100644
index 00000000..8ef6c7e0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_disk.go
@@ -0,0 +1,99 @@
+package ignition
+
+import (
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceDisk() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceDiskExists,
+ Read: resourceDiskRead,
+ Schema: map[string]*schema.Schema{
+ "device": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "wipe_table": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "partition": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "label": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "number": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "size": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "start": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "type_guid": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func resourceDiskRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildDisk(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceDiskExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildDisk(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildDisk(d *schema.ResourceData, c *cache) (string, error) {
+ var partitions []types.Partition
+ for _, raw := range d.Get("partition").([]interface{}) {
+ v := raw.(map[string]interface{})
+
+ partitions = append(partitions, types.Partition{
+ Label: types.PartitionLabel(v["label"].(string)),
+ Number: v["number"].(int),
+ Size: types.PartitionDimension(v["size"].(int)),
+ Start: types.PartitionDimension(v["start"].(int)),
+ TypeGUID: types.PartitionTypeGUID(v["type_guid"].(string)),
+ })
+ }
+
+ return c.addDisk(&types.Disk{
+ Device: types.Path(d.Get("device").(string)),
+ WipeTable: d.Get("wipe_table").(bool),
+ Partitions: partitions,
+ }), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_file.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_file.go
new file mode 100644
index 00000000..0f73ea6e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_file.go
@@ -0,0 +1,178 @@
+package ignition
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceFile() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceFileExists,
+ Read: resourceFileRead,
+ Schema: map[string]*schema.Schema{
+ "filesystem": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "path": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "content": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "mime": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ Default: "text/plain",
+ },
+
+ "content": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ },
+ },
+ },
+ "source": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "source": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "compression": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "verification": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ },
+ },
+ "mode": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "uid": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "gid": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceFileRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildFile(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceFileExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildFile(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildFile(d *schema.ResourceData, c *cache) (string, error) {
+ _, hasContent := d.GetOk("content")
+ _, hasSource := d.GetOk("source")
+ if hasContent && hasSource {
+ return "", fmt.Errorf("content and source options are incompatible")
+ }
+
+ if !hasContent && !hasSource {
+ return "", fmt.Errorf("content or source options must be present")
+ }
+
+ var compression types.Compression
+ var source types.Url
+ var hash *types.Hash
+ var err error
+
+ if hasContent {
+ source, err = encodeDataURL(
+ d.Get("content.0.mime").(string),
+ d.Get("content.0.content").(string),
+ )
+
+ if err != nil {
+ return "", err
+ }
+ }
+
+ if hasSource {
+ source, err = buildURL(d.Get("source.0.source").(string))
+ if err != nil {
+ return "", err
+ }
+
+ compression = types.Compression(d.Get("source.0.compression").(string))
+ h, err := buildHash(d.Get("source.0.verification").(string))
+ if err != nil {
+ return "", err
+ }
+
+ hash = &h
+ }
+
+ return c.addFile(&types.File{
+ Filesystem: d.Get("filesystem").(string),
+ Path: types.Path(d.Get("path").(string)),
+ Contents: types.FileContents{
+ Compression: compression,
+ Source: source,
+ Verification: types.Verification{
+ Hash: hash,
+ },
+ },
+ User: types.FileUser{
+ Id: d.Get("uid").(int),
+ },
+ Group: types.FileGroup{
+ Id: d.Get("gid").(int),
+ },
+ Mode: types.FileMode(d.Get("mode").(int)),
+ }), nil
+}
+
+func encodeDataURL(mime, content string) (types.Url, error) {
+ base64 := base64.StdEncoding.EncodeToString([]byte(content))
+ return buildURL(
+ fmt.Sprintf("data:%s;charset=utf-8;base64,%s", mime, base64),
+ )
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_filesystem.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_filesystem.go
new file mode 100644
index 00000000..ce858e80
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_filesystem.go
@@ -0,0 +1,122 @@
+package ignition
+
+import (
+ "fmt"
+
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceFilesystem() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceFilesystemExists,
+ Read: resourceFilesystemRead,
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "mount": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "device": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "format": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "create": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "force": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "options": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ },
+ },
+ },
+ "path": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceFilesystemRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildFilesystem(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceFilesystemExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildFilesystem(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildFilesystem(d *schema.ResourceData, c *cache) (string, error) {
+ var mount *types.FilesystemMount
+ if _, ok := d.GetOk("mount"); ok {
+ mount = &types.FilesystemMount{
+ Device: types.Path(d.Get("mount.0.device").(string)),
+ Format: types.FilesystemFormat(d.Get("mount.0.format").(string)),
+ }
+
+ create, hasCreate := d.GetOk("mount.0.create")
+ force, hasForce := d.GetOk("mount.0.force")
+ options, hasOptions := d.GetOk("mount.0.options")
+ if hasCreate || hasOptions || hasForce {
+ mount.Create = &types.FilesystemCreate{
+ Force: force.(bool),
+ Options: castSliceInterface(options.([]interface{})),
+ }
+ }
+
+ if !create.(bool) && (hasForce || hasOptions) {
+ return "", fmt.Errorf("create should be true when force or options is used")
+ }
+ }
+
+ var path *types.Path
+ if p, ok := d.GetOk("path"); ok {
+ tp := types.Path(p.(string))
+ path = &tp
+ }
+
+ if mount != nil && path != nil {
+ return "", fmt.Errorf("mount and path are mutually exclusive")
+ }
+
+ return c.addFilesystem(&types.Filesystem{
+ Name: d.Get("name").(string),
+ Mount: mount,
+ Path: path,
+ }), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_group.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_group.go
new file mode 100644
index 00000000..125e97e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_group.go
@@ -0,0 +1,57 @@
+package ignition
+
+import (
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceGroup() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceGroupExists,
+ Read: resourceGroupRead,
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "gid": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "password_hash": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceGroupRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildGroup(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildGroup(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildGroup(d *schema.ResourceData, c *cache) (string, error) {
+ return c.addGroup(&types.Group{
+ Name: d.Get("name").(string),
+ PasswordHash: d.Get("password_hash").(string),
+ Gid: getUInt(d, "gid"),
+ }), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_networkd_unit.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_networkd_unit.go
new file mode 100644
index 00000000..9fd40ed5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_networkd_unit.go
@@ -0,0 +1,60 @@
+package ignition
+
+import (
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceNetworkdUnit() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceNetworkdUnitExists,
+ Read: resourceNetworkdUnitRead,
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "content": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceNetworkdUnitRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildNetworkdUnit(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceNetworkdUnitDelete(d *schema.ResourceData, meta interface{}) error {
+ d.SetId("")
+ return nil
+}
+
+func resourceNetworkdUnitExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildNetworkdUnit(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildNetworkdUnit(d *schema.ResourceData, c *cache) (string, error) {
+ if err := validateUnitContent(d.Get("content").(string)); err != nil {
+ return "", err
+ }
+
+ return c.addNetworkdUnit(&types.NetworkdUnit{
+ Name: types.NetworkdUnitName(d.Get("name").(string)),
+ Contents: d.Get("content").(string),
+ }), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_raid.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_raid.go
new file mode 100644
index 00000000..dab1a5f7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_raid.go
@@ -0,0 +1,69 @@
+package ignition
+
+import (
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceRaid() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceRaidExists,
+ Read: resourceRaidRead,
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "level": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "devices": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "spares": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceRaidRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildRaid(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceRaidExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildRaid(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildRaid(d *schema.ResourceData, c *cache) (string, error) {
+ var devices []types.Path
+ for _, value := range d.Get("devices").([]interface{}) {
+ devices = append(devices, types.Path(value.(string)))
+ }
+
+ return c.addRaid(&types.Raid{
+ Name: d.Get("name").(string),
+ Level: d.Get("level").(string),
+ Devices: devices,
+ Spares: d.Get("spares").(int),
+ }), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_systemd_unit.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_systemd_unit.go
new file mode 100644
index 00000000..88fe9b20
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_systemd_unit.go
@@ -0,0 +1,104 @@
+package ignition
+
+import (
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceSystemdUnit() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceSystemdUnitExists,
+ Read: resourceSystemdUnitRead,
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "enable": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: true,
+ ForceNew: true,
+ },
+ "mask": {
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "content": {
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "dropin": {
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "content": {
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func resourceSystemdUnitRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildSystemdUnit(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceSystemdUnitExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildSystemdUnit(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildSystemdUnit(d *schema.ResourceData, c *cache) (string, error) {
+ var dropins []types.SystemdUnitDropIn
+ for _, raw := range d.Get("dropin").([]interface{}) {
+ value := raw.(map[string]interface{})
+
+ if err := validateUnitContent(value["content"].(string)); err != nil {
+ return "", err
+ }
+
+ dropins = append(dropins, types.SystemdUnitDropIn{
+ Name: types.SystemdUnitDropInName(value["name"].(string)),
+ Contents: value["content"].(string),
+ })
+ }
+
+ if err := validateUnitContent(d.Get("content").(string)); err != nil {
+ if err != errEmptyUnit {
+ return "", err
+ }
+ }
+
+ return c.addSystemdUnit(&types.SystemdUnit{
+ Name: types.SystemdUnitName(d.Get("name").(string)),
+ Contents: d.Get("content").(string),
+ Enable: d.Get("enable").(bool),
+ Mask: d.Get("mask").(bool),
+ DropIns: dropins,
+ }), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_user.go b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_user.go
new file mode 100644
index 00000000..183e6c8c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/builtin/providers/ignition/resource_ignition_user.go
@@ -0,0 +1,126 @@
+package ignition
+
+import (
+ "reflect"
+
+ "github.com/coreos/ignition/config/types"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceUser() *schema.Resource {
+ return &schema.Resource{
+ Exists: resourceUserExists,
+ Read: resourceUserRead,
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "password_hash": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "ssh_authorized_keys": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "uid": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "gecos": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "home_dir": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "no_create_home": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "primary_group": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "groups": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ "no_user_group": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "no_log_init": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ "shell": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceUserRead(d *schema.ResourceData, meta interface{}) error {
+ id, err := buildUser(d, globalCache)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(id)
+ return nil
+}
+
+func resourceUserExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ id, err := buildUser(d, globalCache)
+ if err != nil {
+ return false, err
+ }
+
+ return id == d.Id(), nil
+}
+
+func buildUser(d *schema.ResourceData, c *cache) (string, error) {
+ uc := types.UserCreate{
+ Uid: getUInt(d, "uid"),
+ GECOS: d.Get("gecos").(string),
+ Homedir: d.Get("home_dir").(string),
+ NoCreateHome: d.Get("no_create_home").(bool),
+ PrimaryGroup: d.Get("primary_group").(string),
+ Groups: castSliceInterface(d.Get("groups").([]interface{})),
+ NoUserGroup: d.Get("no_user_group").(bool),
+ NoLogInit: d.Get("no_log_init").(bool),
+ Shell: d.Get("shell").(string),
+ }
+
+ puc := &uc
+ if reflect.DeepEqual(uc, types.UserCreate{}) { // check if the struct is empty
+ puc = nil
+ }
+
+ user := types.User{
+ Name: d.Get("name").(string),
+ PasswordHash: d.Get("password_hash").(string),
+ SSHAuthorizedKeys: castSliceInterface(d.Get("ssh_authorized_keys").([]interface{})),
+ Create: puc,
+ }
+
+ return c.addUser(&user), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go
new file mode 100644
index 00000000..5f4e89ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/append.go
@@ -0,0 +1,86 @@
+package config
+
+// Append appends one configuration to another.
+//
+// Append assumes that both configurations will not have
+// conflicting variables, resources, etc. If they do, the
+// problems will be caught in the validation phase.
+//
+// It is possible that c1, c2 on their own are not valid. For
+// example, a resource in c2 may reference a variable in c1. But
+// together, they would be valid.
+func Append(c1, c2 *Config) (*Config, error) {
+ c := new(Config)
+
+ // Append unknown keys, but keep them unique since it is a set
+ unknowns := make(map[string]struct{})
+ for _, k := range c1.unknownKeys {
+ _, present := unknowns[k]
+ if !present {
+ unknowns[k] = struct{}{}
+ c.unknownKeys = append(c.unknownKeys, k)
+ }
+ }
+
+ for _, k := range c2.unknownKeys {
+ _, present := unknowns[k]
+ if !present {
+ unknowns[k] = struct{}{}
+ c.unknownKeys = append(c.unknownKeys, k)
+ }
+ }
+
+ c.Atlas = c1.Atlas
+ if c2.Atlas != nil {
+ c.Atlas = c2.Atlas
+ }
+
+ // merge Terraform blocks
+ if c1.Terraform != nil {
+ c.Terraform = c1.Terraform
+ if c2.Terraform != nil {
+ c.Terraform.Merge(c2.Terraform)
+ }
+ } else {
+ c.Terraform = c2.Terraform
+ }
+
+ if len(c1.Modules) > 0 || len(c2.Modules) > 0 {
+ c.Modules = make(
+ []*Module, 0, len(c1.Modules)+len(c2.Modules))
+ c.Modules = append(c.Modules, c1.Modules...)
+ c.Modules = append(c.Modules, c2.Modules...)
+ }
+
+ if len(c1.Outputs) > 0 || len(c2.Outputs) > 0 {
+ c.Outputs = make(
+ []*Output, 0, len(c1.Outputs)+len(c2.Outputs))
+ c.Outputs = append(c.Outputs, c1.Outputs...)
+ c.Outputs = append(c.Outputs, c2.Outputs...)
+ }
+
+ if len(c1.ProviderConfigs) > 0 || len(c2.ProviderConfigs) > 0 {
+ c.ProviderConfigs = make(
+ []*ProviderConfig,
+ 0, len(c1.ProviderConfigs)+len(c2.ProviderConfigs))
+ c.ProviderConfigs = append(c.ProviderConfigs, c1.ProviderConfigs...)
+ c.ProviderConfigs = append(c.ProviderConfigs, c2.ProviderConfigs...)
+ }
+
+ if len(c1.Resources) > 0 || len(c2.Resources) > 0 {
+ c.Resources = make(
+ []*Resource,
+ 0, len(c1.Resources)+len(c2.Resources))
+ c.Resources = append(c.Resources, c1.Resources...)
+ c.Resources = append(c.Resources, c2.Resources...)
+ }
+
+ if len(c1.Variables) > 0 || len(c2.Variables) > 0 {
+ c.Variables = make(
+ []*Variable, 0, len(c1.Variables)+len(c2.Variables))
+ c.Variables = append(c.Variables, c1.Variables...)
+ c.Variables = append(c.Variables, c2.Variables...)
+ }
+
+ return c, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go
new file mode 100644
index 00000000..9a764ace
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config.go
@@ -0,0 +1,1096 @@
+// The config package is responsible for loading and validating the
+// configuration.
+package config
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/hil"
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/terraform/helper/hilmapstructure"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// NameRegexp is the regular expression that all names (modules, providers,
+// resources, etc.) must follow.
+var NameRegexp = regexp.MustCompile(`(?i)\A[A-Z0-9_][A-Z0-9\-\_]*\z`)
+
+// Config is the configuration that comes from loading a collection
+// of Terraform templates.
+type Config struct {
+ // Dir is the path to the directory where this configuration was
+ // loaded from. If it is blank, this configuration wasn't loaded from
+ // any meaningful directory.
+ Dir string
+
+ Terraform *Terraform
+ Atlas *AtlasConfig
+ Modules []*Module
+ ProviderConfigs []*ProviderConfig
+ Resources []*Resource
+ Variables []*Variable
+ Outputs []*Output
+
+ // The fields below can be filled in by loaders for validation
+ // purposes.
+ unknownKeys []string
+}
+
+// AtlasConfig is the configuration for building in HashiCorp's Atlas.
+type AtlasConfig struct {
+ Name string
+ Include []string
+ Exclude []string
+}
+
+// Module is a module used within a configuration.
+//
+// This does not represent a module itself, this represents a module
+// call-site within an existing configuration.
+type Module struct {
+ Name string
+ Source string
+ RawConfig *RawConfig
+}
+
+// ProviderConfig is the configuration for a resource provider.
+//
+// For example, Terraform needs to set the AWS access keys for the AWS
+// resource provider.
+type ProviderConfig struct {
+ Name string
+ Alias string
+ RawConfig *RawConfig
+}
+
+// A resource represents a single Terraform resource in the configuration.
+// A Terraform resource is something that supports some or all of the
+// usual "create, read, update, delete" operations, depending on
+// the given Mode.
+type Resource struct {
+ Mode ResourceMode // which operations the resource supports
+ Name string
+ Type string
+ RawCount *RawConfig
+ RawConfig *RawConfig
+ Provisioners []*Provisioner
+ Provider string
+ DependsOn []string
+ Lifecycle ResourceLifecycle
+}
+
+// Copy returns a copy of this Resource. Helpful for avoiding shared
+// config pointers across multiple pieces of the graph that need to do
+// interpolation.
+func (r *Resource) Copy() *Resource {
+ n := &Resource{
+ Mode: r.Mode,
+ Name: r.Name,
+ Type: r.Type,
+ RawCount: r.RawCount.Copy(),
+ RawConfig: r.RawConfig.Copy(),
+ Provisioners: make([]*Provisioner, 0, len(r.Provisioners)),
+ Provider: r.Provider,
+ DependsOn: make([]string, len(r.DependsOn)),
+ Lifecycle: *r.Lifecycle.Copy(),
+ }
+ for _, p := range r.Provisioners {
+ n.Provisioners = append(n.Provisioners, p.Copy())
+ }
+ copy(n.DependsOn, r.DependsOn)
+ return n
+}
+
+// ResourceLifecycle is used to store the lifecycle tuning parameters
+// to allow customized behavior
+type ResourceLifecycle struct {
+ CreateBeforeDestroy bool `mapstructure:"create_before_destroy"`
+ PreventDestroy bool `mapstructure:"prevent_destroy"`
+ IgnoreChanges []string `mapstructure:"ignore_changes"`
+}
+
+// Copy returns a copy of this ResourceLifecycle
+func (r *ResourceLifecycle) Copy() *ResourceLifecycle {
+ n := &ResourceLifecycle{
+ CreateBeforeDestroy: r.CreateBeforeDestroy,
+ PreventDestroy: r.PreventDestroy,
+ IgnoreChanges: make([]string, len(r.IgnoreChanges)),
+ }
+ copy(n.IgnoreChanges, r.IgnoreChanges)
+ return n
+}
+
+// Provisioner is a configured provisioner step on a resource.
+type Provisioner struct {
+ Type string
+ RawConfig *RawConfig
+ ConnInfo *RawConfig
+
+ When ProvisionerWhen
+ OnFailure ProvisionerOnFailure
+}
+
+// Copy returns a copy of this Provisioner
+func (p *Provisioner) Copy() *Provisioner {
+ return &Provisioner{
+ Type: p.Type,
+ RawConfig: p.RawConfig.Copy(),
+ ConnInfo: p.ConnInfo.Copy(),
+ When: p.When,
+ OnFailure: p.OnFailure,
+ }
+}
+
+// Variable is a variable defined within the configuration.
+type Variable struct {
+ Name string
+ DeclaredType string `mapstructure:"type"`
+ Default interface{}
+ Description string
+}
+
+// Output is an output defined within the configuration. An output is
+// resulting data that is highlighted by Terraform when finished. An
+// output marked Sensitive will be output in a masked form following
+// application, but will still be available in state.
+type Output struct {
+ Name string
+ DependsOn []string
+ Description string
+ Sensitive bool
+ RawConfig *RawConfig
+}
+
+// VariableType is the type of value a variable is holding, and returned
+// by the Type() function on variables.
+type VariableType byte
+
+const (
+ VariableTypeUnknown VariableType = iota
+ VariableTypeString
+ VariableTypeList
+ VariableTypeMap
+)
+
+func (v VariableType) Printable() string {
+ switch v {
+ case VariableTypeString:
+ return "string"
+ case VariableTypeMap:
+ return "map"
+ case VariableTypeList:
+ return "list"
+ default:
+ return "unknown"
+ }
+}
+
+// ProviderConfigName returns the name of the provider configuration in
+// the given mapping that maps to the proper provider configuration
+// for this resource.
+func ProviderConfigName(t string, pcs []*ProviderConfig) string {
+ lk := ""
+ for _, v := range pcs {
+ k := v.Name
+ if strings.HasPrefix(t, k) && len(k) > len(lk) {
+ lk = k
+ }
+ }
+
+ return lk
+}
+
+// A unique identifier for this module.
+func (r *Module) Id() string {
+ return fmt.Sprintf("%s", r.Name)
+}
+
+// Count returns the count of this resource.
+func (r *Resource) Count() (int, error) {
+ raw := r.RawCount.Value()
+ count, ok := r.RawCount.Value().(string)
+ if !ok {
+ return 0, fmt.Errorf(
+ "expected count to be a string or int, got %T", raw)
+ }
+
+ v, err := strconv.ParseInt(count, 0, 0)
+ if err != nil {
+ return 0, err
+ }
+
+ return int(v), nil
+}
+
+// A unique identifier for this resource.
+func (r *Resource) Id() string {
+ switch r.Mode {
+ case ManagedResourceMode:
+ return fmt.Sprintf("%s.%s", r.Type, r.Name)
+ case DataResourceMode:
+ return fmt.Sprintf("data.%s.%s", r.Type, r.Name)
+ default:
+ panic(fmt.Errorf("unknown resource mode %s", r.Mode))
+ }
+}
+
+// Validate does some basic semantic checking of the configuration.
+func (c *Config) Validate() error {
+ if c == nil {
+ return nil
+ }
+
+ var errs []error
+
+ for _, k := range c.unknownKeys {
+ errs = append(errs, fmt.Errorf(
+ "Unknown root level key: %s", k))
+ }
+
+ // Validate the Terraform config
+ if tf := c.Terraform; tf != nil {
+ errs = append(errs, c.Terraform.Validate()...)
+ }
+
+ vars := c.InterpolatedVariables()
+ varMap := make(map[string]*Variable)
+ for _, v := range c.Variables {
+ if _, ok := varMap[v.Name]; ok {
+ errs = append(errs, fmt.Errorf(
+ "Variable '%s': duplicate found. Variable names must be unique.",
+ v.Name))
+ }
+
+ varMap[v.Name] = v
+ }
+
+ for k, _ := range varMap {
+ if !NameRegexp.MatchString(k) {
+ errs = append(errs, fmt.Errorf(
+ "variable %q: variable name must match regular expresion %s",
+ k, NameRegexp))
+ }
+ }
+
+ for _, v := range c.Variables {
+ if v.Type() == VariableTypeUnknown {
+ errs = append(errs, fmt.Errorf(
+ "Variable '%s': must be a string or a map",
+ v.Name))
+ continue
+ }
+
+ interp := false
+ fn := func(n ast.Node) (interface{}, error) {
+ // LiteralNode is a literal string (outside of a ${ ... } sequence).
+ // interpolationWalker skips most of these. but in particular it
+ // visits those that have escaped sequences (like $${foo}) as a
+ // signal that *some* processing is required on this string. For
+ // our purposes here though, this is fine and not an interpolation.
+ if _, ok := n.(*ast.LiteralNode); !ok {
+ interp = true
+ }
+ return "", nil
+ }
+
+ w := &interpolationWalker{F: fn}
+ if v.Default != nil {
+ if err := reflectwalk.Walk(v.Default, w); err == nil {
+ if interp {
+ errs = append(errs, fmt.Errorf(
+ "Variable '%s': cannot contain interpolations",
+ v.Name))
+ }
+ }
+ }
+ }
+
+ // Check for references to user variables that do not actually
+ // exist and record those errors.
+ for source, vs := range vars {
+ for _, v := range vs {
+ uv, ok := v.(*UserVariable)
+ if !ok {
+ continue
+ }
+
+ if _, ok := varMap[uv.Name]; !ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: unknown variable referenced: '%s'. define it with 'variable' blocks",
+ source,
+ uv.Name))
+ }
+ }
+ }
+
+ // Check that all count variables are valid.
+ for source, vs := range vars {
+ for _, rawV := range vs {
+ switch v := rawV.(type) {
+ case *CountVariable:
+ if v.Type == CountValueInvalid {
+ errs = append(errs, fmt.Errorf(
+ "%s: invalid count variable: %s",
+ source,
+ v.FullKey()))
+ }
+ case *PathVariable:
+ if v.Type == PathValueInvalid {
+ errs = append(errs, fmt.Errorf(
+ "%s: invalid path variable: %s",
+ source,
+ v.FullKey()))
+ }
+ }
+ }
+ }
+
+ // Check that providers aren't declared multiple times.
+ providerSet := make(map[string]struct{})
+ for _, p := range c.ProviderConfigs {
+ name := p.FullName()
+ if _, ok := providerSet[name]; ok {
+ errs = append(errs, fmt.Errorf(
+ "provider.%s: declared multiple times, you can only declare a provider once",
+ name))
+ continue
+ }
+
+ providerSet[name] = struct{}{}
+ }
+
+ // Check that all references to modules are valid
+ modules := make(map[string]*Module)
+ dupped := make(map[string]struct{})
+ for _, m := range c.Modules {
+ // Check for duplicates
+ if _, ok := modules[m.Id()]; ok {
+ if _, ok := dupped[m.Id()]; !ok {
+ dupped[m.Id()] = struct{}{}
+
+ errs = append(errs, fmt.Errorf(
+ "%s: module repeated multiple times",
+ m.Id()))
+ }
+
+ // Already seen this module, just skip it
+ continue
+ }
+
+ modules[m.Id()] = m
+
+ // Check that the source has no interpolations
+ rc, err := NewRawConfig(map[string]interface{}{
+ "root": m.Source,
+ })
+ if err != nil {
+ errs = append(errs, fmt.Errorf(
+ "%s: module source error: %s",
+ m.Id(), err))
+ } else if len(rc.Interpolations) > 0 {
+ errs = append(errs, fmt.Errorf(
+ "%s: module source cannot contain interpolations",
+ m.Id()))
+ }
+
+ // Check that the name matches our regexp
+ if !NameRegexp.Match([]byte(m.Name)) {
+ errs = append(errs, fmt.Errorf(
+ "%s: module name can only contain letters, numbers, "+
+ "dashes, and underscores",
+ m.Id()))
+ }
+
+ // Check that the configuration can all be strings, lists or maps
+ raw := make(map[string]interface{})
+ for k, v := range m.RawConfig.Raw {
+ var strVal string
+ if err := hilmapstructure.WeakDecode(v, &strVal); err == nil {
+ raw[k] = strVal
+ continue
+ }
+
+ var mapVal map[string]interface{}
+ if err := hilmapstructure.WeakDecode(v, &mapVal); err == nil {
+ raw[k] = mapVal
+ continue
+ }
+
+ var sliceVal []interface{}
+ if err := hilmapstructure.WeakDecode(v, &sliceVal); err == nil {
+ raw[k] = sliceVal
+ continue
+ }
+
+ errs = append(errs, fmt.Errorf(
+ "%s: variable %s must be a string, list or map value",
+ m.Id(), k))
+ }
+
+ // Check for invalid count variables
+ for _, v := range m.RawConfig.Variables {
+ switch v.(type) {
+ case *CountVariable:
+ errs = append(errs, fmt.Errorf(
+ "%s: count variables are only valid within resources", m.Name))
+ case *SelfVariable:
+ errs = append(errs, fmt.Errorf(
+ "%s: self variables are only valid within resources", m.Name))
+ }
+ }
+
+ // Update the raw configuration to only contain the string values
+ m.RawConfig, err = NewRawConfig(raw)
+ if err != nil {
+ errs = append(errs, fmt.Errorf(
+ "%s: can't initialize configuration: %s",
+ m.Id(), err))
+ }
+ }
+ dupped = nil
+
+ // Check that all variables for modules reference modules that
+ // exist.
+ for source, vs := range vars {
+ for _, v := range vs {
+ mv, ok := v.(*ModuleVariable)
+ if !ok {
+ continue
+ }
+
+ if _, ok := modules[mv.Name]; !ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: unknown module referenced: %s",
+ source,
+ mv.Name))
+ }
+ }
+ }
+
+ // Check that all references to resources are valid
+ resources := make(map[string]*Resource)
+ dupped = make(map[string]struct{})
+ for _, r := range c.Resources {
+ if _, ok := resources[r.Id()]; ok {
+ if _, ok := dupped[r.Id()]; !ok {
+ dupped[r.Id()] = struct{}{}
+
+ errs = append(errs, fmt.Errorf(
+ "%s: resource repeated multiple times",
+ r.Id()))
+ }
+ }
+
+ resources[r.Id()] = r
+ }
+ dupped = nil
+
+ // Validate resources
+ for n, r := range resources {
+ // Verify count variables
+ for _, v := range r.RawCount.Variables {
+ switch v.(type) {
+ case *CountVariable:
+ errs = append(errs, fmt.Errorf(
+ "%s: resource count can't reference count variable: %s",
+ n,
+ v.FullKey()))
+ case *SimpleVariable:
+ errs = append(errs, fmt.Errorf(
+ "%s: resource count can't reference variable: %s",
+ n,
+ v.FullKey()))
+
+ // Good
+ case *ModuleVariable:
+ case *ResourceVariable:
+ case *TerraformVariable:
+ case *UserVariable:
+
+ default:
+ errs = append(errs, fmt.Errorf(
+ "Internal error. Unknown type in count var in %s: %T",
+ n, v))
+ }
+ }
+
+ // Interpolate with a fixed number to verify that its a number.
+ r.RawCount.interpolate(func(root ast.Node) (interface{}, error) {
+ // Execute the node but transform the AST so that it returns
+ // a fixed value of "5" for all interpolations.
+ result, err := hil.Eval(
+ hil.FixedValueTransform(
+ root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}),
+ nil)
+ if err != nil {
+ return "", err
+ }
+
+ return result.Value, nil
+ })
+ _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0)
+ if err != nil {
+ errs = append(errs, fmt.Errorf(
+ "%s: resource count must be an integer",
+ n))
+ }
+ r.RawCount.init()
+
+ // Validate DependsOn
+ errs = append(errs, c.validateDependsOn(n, r.DependsOn, resources, modules)...)
+
+ // Verify provisioners
+ for _, p := range r.Provisioners {
+ // This validation checks that there are now splat variables
+ // referencing ourself. This currently is not allowed.
+
+ for _, v := range p.ConnInfo.Variables {
+ rv, ok := v.(*ResourceVariable)
+ if !ok {
+ continue
+ }
+
+ if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name {
+ errs = append(errs, fmt.Errorf(
+ "%s: connection info cannot contain splat variable "+
+ "referencing itself", n))
+ break
+ }
+ }
+
+ for _, v := range p.RawConfig.Variables {
+ rv, ok := v.(*ResourceVariable)
+ if !ok {
+ continue
+ }
+
+ if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name {
+ errs = append(errs, fmt.Errorf(
+ "%s: connection info cannot contain splat variable "+
+ "referencing itself", n))
+ break
+ }
+ }
+
+ // Check for invalid when/onFailure values, though this should be
+ // picked up by the loader we check here just in case.
+ if p.When == ProvisionerWhenInvalid {
+ errs = append(errs, fmt.Errorf(
+ "%s: provisioner 'when' value is invalid", n))
+ }
+ if p.OnFailure == ProvisionerOnFailureInvalid {
+ errs = append(errs, fmt.Errorf(
+ "%s: provisioner 'on_failure' value is invalid", n))
+ }
+ }
+
+ // Verify ignore_changes contains valid entries
+ for _, v := range r.Lifecycle.IgnoreChanges {
+ if strings.Contains(v, "*") && v != "*" {
+ errs = append(errs, fmt.Errorf(
+ "%s: ignore_changes does not support using a partial string "+
+ "together with a wildcard: %s", n, v))
+ }
+ }
+
+ // Verify ignore_changes has no interpolations
+ rc, err := NewRawConfig(map[string]interface{}{
+ "root": r.Lifecycle.IgnoreChanges,
+ })
+ if err != nil {
+ errs = append(errs, fmt.Errorf(
+ "%s: lifecycle ignore_changes error: %s",
+ n, err))
+ } else if len(rc.Interpolations) > 0 {
+ errs = append(errs, fmt.Errorf(
+ "%s: lifecycle ignore_changes cannot contain interpolations",
+ n))
+ }
+
+ // If it is a data source then it can't have provisioners
+ if r.Mode == DataResourceMode {
+ if _, ok := r.RawConfig.Raw["provisioner"]; ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: data sources cannot have provisioners",
+ n))
+ }
+ }
+ }
+
+ for source, vs := range vars {
+ for _, v := range vs {
+ rv, ok := v.(*ResourceVariable)
+ if !ok {
+ continue
+ }
+
+ id := rv.ResourceId()
+ if _, ok := resources[id]; !ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: unknown resource '%s' referenced in variable %s",
+ source,
+ id,
+ rv.FullKey()))
+ continue
+ }
+ }
+ }
+
+ // Check that all outputs are valid
+ {
+ found := make(map[string]struct{})
+ for _, o := range c.Outputs {
+ // Verify the output is new
+ if _, ok := found[o.Name]; ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: duplicate output. output names must be unique.",
+ o.Name))
+ continue
+ }
+ found[o.Name] = struct{}{}
+
+ var invalidKeys []string
+ valueKeyFound := false
+ for k := range o.RawConfig.Raw {
+ if k == "value" {
+ valueKeyFound = true
+ continue
+ }
+ if k == "sensitive" {
+ if sensitive, ok := o.RawConfig.config[k].(bool); ok {
+ if sensitive {
+ o.Sensitive = true
+ }
+ continue
+ }
+
+ errs = append(errs, fmt.Errorf(
+ "%s: value for 'sensitive' must be boolean",
+ o.Name))
+ continue
+ }
+ if k == "description" {
+ if desc, ok := o.RawConfig.config[k].(string); ok {
+ o.Description = desc
+ continue
+ }
+
+ errs = append(errs, fmt.Errorf(
+ "%s: value for 'description' must be string",
+ o.Name))
+ continue
+ }
+ invalidKeys = append(invalidKeys, k)
+ }
+ if len(invalidKeys) > 0 {
+ errs = append(errs, fmt.Errorf(
+ "%s: output has invalid keys: %s",
+ o.Name, strings.Join(invalidKeys, ", ")))
+ }
+ if !valueKeyFound {
+ errs = append(errs, fmt.Errorf(
+ "%s: output is missing required 'value' key", o.Name))
+ }
+
+ for _, v := range o.RawConfig.Variables {
+ if _, ok := v.(*CountVariable); ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: count variables are only valid within resources", o.Name))
+ }
+ }
+ }
+ }
+
+ // Check that all variables are in the proper context
+ for source, rc := range c.rawConfigs() {
+ walker := &interpolationWalker{
+ ContextF: c.validateVarContextFn(source, &errs),
+ }
+ if err := reflectwalk.Walk(rc.Raw, walker); err != nil {
+ errs = append(errs, fmt.Errorf(
+ "%s: error reading config: %s", source, err))
+ }
+ }
+
+ // Validate the self variable
+ for source, rc := range c.rawConfigs() {
+ // Ignore provisioners. This is a pretty brittle way to do this,
+ // but better than also repeating all the resources.
+ if strings.Contains(source, "provision") {
+ continue
+ }
+
+ for _, v := range rc.Variables {
+ if _, ok := v.(*SelfVariable); ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: cannot contain self-reference %s", source, v.FullKey()))
+ }
+ }
+ }
+
+ if len(errs) > 0 {
+ return &multierror.Error{Errors: errs}
+ }
+
+ return nil
+}
+
+// InterpolatedVariables is a helper that returns a mapping of all the interpolated
+// variables within the configuration. This is used to verify references
+// are valid in the Validate step.
+func (c *Config) InterpolatedVariables() map[string][]InterpolatedVariable {
+ result := make(map[string][]InterpolatedVariable)
+ for source, rc := range c.rawConfigs() {
+ for _, v := range rc.Variables {
+ result[source] = append(result[source], v)
+ }
+ }
+ return result
+}
+
+// rawConfigs returns all of the RawConfigs that are available keyed by
+// a human-friendly source.
+func (c *Config) rawConfigs() map[string]*RawConfig {
+ result := make(map[string]*RawConfig)
+ for _, m := range c.Modules {
+ source := fmt.Sprintf("module '%s'", m.Name)
+ result[source] = m.RawConfig
+ }
+
+ for _, pc := range c.ProviderConfigs {
+ source := fmt.Sprintf("provider config '%s'", pc.Name)
+ result[source] = pc.RawConfig
+ }
+
+ for _, rc := range c.Resources {
+ source := fmt.Sprintf("resource '%s'", rc.Id())
+ result[source+" count"] = rc.RawCount
+ result[source+" config"] = rc.RawConfig
+
+ for i, p := range rc.Provisioners {
+ subsource := fmt.Sprintf(
+ "%s provisioner %s (#%d)",
+ source, p.Type, i+1)
+ result[subsource] = p.RawConfig
+ }
+ }
+
+ for _, o := range c.Outputs {
+ source := fmt.Sprintf("output '%s'", o.Name)
+ result[source] = o.RawConfig
+ }
+
+ return result
+}
+
+func (c *Config) validateVarContextFn(
+ source string, errs *[]error) interpolationWalkerContextFunc {
+ return func(loc reflectwalk.Location, node ast.Node) {
+ // If we're in a slice element, then its fine, since you can do
+ // anything in there.
+ if loc == reflectwalk.SliceElem {
+ return
+ }
+
+ // Otherwise, let's check if there is a splat resource variable
+ // at the top level in here. We do this by doing a transform that
+ // replaces everything with a noop node unless its a variable
+ // access or concat. This should turn the AST into a flat tree
+ // of Concat(Noop, ...). If there are any variables left that are
+ // multi-access, then its still broken.
+ node = node.Accept(func(n ast.Node) ast.Node {
+ // If it is a concat or variable access, we allow it.
+ switch n.(type) {
+ case *ast.Output:
+ return n
+ case *ast.VariableAccess:
+ return n
+ }
+
+ // Otherwise, noop
+ return &noopNode{}
+ })
+
+ vars, err := DetectVariables(node)
+ if err != nil {
+ // Ignore it since this will be caught during parse. This
+ // actually probably should never happen by the time this
+ // is called, but its okay.
+ return
+ }
+
+ for _, v := range vars {
+ rv, ok := v.(*ResourceVariable)
+ if !ok {
+ return
+ }
+
+ if rv.Multi && rv.Index == -1 {
+ *errs = append(*errs, fmt.Errorf(
+ "%s: use of the splat ('*') operator must be wrapped in a list declaration",
+ source))
+ }
+ }
+ }
+}
+
+func (c *Config) validateDependsOn(
+ n string,
+ v []string,
+ resources map[string]*Resource,
+ modules map[string]*Module) []error {
+ // Verify depends on points to resources that all exist
+ var errs []error
+ for _, d := range v {
+ // Check if we contain interpolations
+ rc, err := NewRawConfig(map[string]interface{}{
+ "value": d,
+ })
+ if err == nil && len(rc.Variables) > 0 {
+ errs = append(errs, fmt.Errorf(
+ "%s: depends on value cannot contain interpolations: %s",
+ n, d))
+ continue
+ }
+
+ // If it is a module, verify it is a module
+ if strings.HasPrefix(d, "module.") {
+ name := d[len("module."):]
+ if _, ok := modules[name]; !ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: resource depends on non-existent module '%s'",
+ n, name))
+ }
+
+ continue
+ }
+
+ // Check resources
+ if _, ok := resources[d]; !ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: resource depends on non-existent resource '%s'",
+ n, d))
+ }
+ }
+
+ return errs
+}
+
+func (m *Module) mergerName() string {
+ return m.Id()
+}
+
+func (m *Module) mergerMerge(other merger) merger {
+ m2 := other.(*Module)
+
+ result := *m
+ result.Name = m2.Name
+ result.RawConfig = result.RawConfig.merge(m2.RawConfig)
+
+ if m2.Source != "" {
+ result.Source = m2.Source
+ }
+
+ return &result
+}
+
+func (o *Output) mergerName() string {
+ return o.Name
+}
+
+func (o *Output) mergerMerge(m merger) merger {
+ o2 := m.(*Output)
+
+ result := *o
+ result.Name = o2.Name
+ result.Description = o2.Description
+ result.RawConfig = result.RawConfig.merge(o2.RawConfig)
+ result.Sensitive = o2.Sensitive
+ result.DependsOn = o2.DependsOn
+
+ return &result
+}
+
+func (c *ProviderConfig) GoString() string {
+ return fmt.Sprintf("*%#v", *c)
+}
+
+func (c *ProviderConfig) FullName() string {
+ if c.Alias == "" {
+ return c.Name
+ }
+
+ return fmt.Sprintf("%s.%s", c.Name, c.Alias)
+}
+
+func (c *ProviderConfig) mergerName() string {
+ return c.Name
+}
+
+func (c *ProviderConfig) mergerMerge(m merger) merger {
+ c2 := m.(*ProviderConfig)
+
+ result := *c
+ result.Name = c2.Name
+ result.RawConfig = result.RawConfig.merge(c2.RawConfig)
+
+ if c2.Alias != "" {
+ result.Alias = c2.Alias
+ }
+
+ return &result
+}
+
+func (r *Resource) mergerName() string {
+ return r.Id()
+}
+
+func (r *Resource) mergerMerge(m merger) merger {
+ r2 := m.(*Resource)
+
+ result := *r
+ result.Mode = r2.Mode
+ result.Name = r2.Name
+ result.Type = r2.Type
+ result.RawConfig = result.RawConfig.merge(r2.RawConfig)
+
+ if r2.RawCount.Value() != "1" {
+ result.RawCount = r2.RawCount
+ }
+
+ if len(r2.Provisioners) > 0 {
+ result.Provisioners = r2.Provisioners
+ }
+
+ return &result
+}
+
+// Merge merges two variables to create a new third variable.
+func (v *Variable) Merge(v2 *Variable) *Variable {
+ // Shallow copy the variable
+ result := *v
+
+ // The names should be the same, but the second name always wins.
+ result.Name = v2.Name
+
+ if v2.DeclaredType != "" {
+ result.DeclaredType = v2.DeclaredType
+ }
+ if v2.Default != nil {
+ result.Default = v2.Default
+ }
+ if v2.Description != "" {
+ result.Description = v2.Description
+ }
+
+ return &result
+}
+
+var typeStringMap = map[string]VariableType{
+ "string": VariableTypeString,
+ "map": VariableTypeMap,
+ "list": VariableTypeList,
+}
+
+// Type returns the type of variable this is.
+func (v *Variable) Type() VariableType {
+ if v.DeclaredType != "" {
+ declaredType, ok := typeStringMap[v.DeclaredType]
+ if !ok {
+ return VariableTypeUnknown
+ }
+
+ return declaredType
+ }
+
+ return v.inferTypeFromDefault()
+}
+
+// ValidateTypeAndDefault ensures that default variable value is compatible
+// with the declared type (if one exists), and that the type is one which is
+// known to Terraform
+func (v *Variable) ValidateTypeAndDefault() error {
+ // If an explicit type is declared, ensure it is valid
+ if v.DeclaredType != "" {
+ if _, ok := typeStringMap[v.DeclaredType]; !ok {
+ validTypes := []string{}
+ for k := range typeStringMap {
+ validTypes = append(validTypes, k)
+ }
+ return fmt.Errorf(
+ "Variable '%s' type must be one of [%s] - '%s' is not a valid type",
+ v.Name,
+ strings.Join(validTypes, ", "),
+ v.DeclaredType,
+ )
+ }
+ }
+
+ if v.DeclaredType == "" || v.Default == nil {
+ return nil
+ }
+
+ if v.inferTypeFromDefault() != v.Type() {
+ return fmt.Errorf("'%s' has a default value which is not of type '%s' (got '%s')",
+ v.Name, v.DeclaredType, v.inferTypeFromDefault().Printable())
+ }
+
+ return nil
+}
+
+func (v *Variable) mergerName() string {
+ return v.Name
+}
+
+func (v *Variable) mergerMerge(m merger) merger {
+ return v.Merge(m.(*Variable))
+}
+
+// Required tests whether a variable is required or not.
+func (v *Variable) Required() bool {
+ return v.Default == nil
+}
+
+// inferTypeFromDefault contains the logic for the old method of inferring
+// variable types - we can also use this for validating that the declared
+// type matches the type of the default value
+func (v *Variable) inferTypeFromDefault() VariableType {
+ if v.Default == nil {
+ return VariableTypeString
+ }
+
+ var s string
+ if err := hilmapstructure.WeakDecode(v.Default, &s); err == nil {
+ v.Default = s
+ return VariableTypeString
+ }
+
+ var m map[string]interface{}
+ if err := hilmapstructure.WeakDecode(v.Default, &m); err == nil {
+ v.Default = m
+ return VariableTypeMap
+ }
+
+ var l []interface{}
+ if err := hilmapstructure.WeakDecode(v.Default, &l); err == nil {
+ v.Default = l
+ return VariableTypeList
+ }
+
+ return VariableTypeUnknown
+}
+
+func (m ResourceMode) Taintable() bool {
+ switch m {
+ case ManagedResourceMode:
+ return true
+ case DataResourceMode:
+ return false
+ default:
+ panic(fmt.Errorf("unsupported ResourceMode value %s", m))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go
new file mode 100644
index 00000000..0b3abbcd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config_string.go
@@ -0,0 +1,338 @@
+package config
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// TestString is a Stringer-like function that outputs a string that can
+// be used to easily compare multiple Config structures in unit tests.
+//
+// This function has no practical use outside of unit tests and debugging.
+func (c *Config) TestString() string {
+ if c == nil {
+ return "<nil config>"
+ }
+
+ var buf bytes.Buffer
+ if len(c.Modules) > 0 {
+ buf.WriteString("Modules:\n\n")
+ buf.WriteString(modulesStr(c.Modules))
+ buf.WriteString("\n\n")
+ }
+
+ if len(c.Variables) > 0 {
+ buf.WriteString("Variables:\n\n")
+ buf.WriteString(variablesStr(c.Variables))
+ buf.WriteString("\n\n")
+ }
+
+ if len(c.ProviderConfigs) > 0 {
+ buf.WriteString("Provider Configs:\n\n")
+ buf.WriteString(providerConfigsStr(c.ProviderConfigs))
+ buf.WriteString("\n\n")
+ }
+
+ if len(c.Resources) > 0 {
+ buf.WriteString("Resources:\n\n")
+ buf.WriteString(resourcesStr(c.Resources))
+ buf.WriteString("\n\n")
+ }
+
+ if len(c.Outputs) > 0 {
+ buf.WriteString("Outputs:\n\n")
+ buf.WriteString(outputsStr(c.Outputs))
+ buf.WriteString("\n")
+ }
+
+ return strings.TrimSpace(buf.String())
+}
+
+func terraformStr(t *Terraform) string {
+ result := ""
+
+ if b := t.Backend; b != nil {
+ result += fmt.Sprintf("backend (%s)\n", b.Type)
+
+ keys := make([]string, 0, len(b.RawConfig.Raw))
+ for k, _ := range b.RawConfig.Raw {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ result += fmt.Sprintf(" %s\n", k)
+ }
+ }
+
+ return strings.TrimSpace(result)
+}
+
+func modulesStr(ms []*Module) string {
+ result := ""
+ order := make([]int, 0, len(ms))
+ ks := make([]string, 0, len(ms))
+ mapping := make(map[string]int)
+ for i, m := range ms {
+ k := m.Id()
+ ks = append(ks, k)
+ mapping[k] = i
+ }
+ sort.Strings(ks)
+ for _, k := range ks {
+ order = append(order, mapping[k])
+ }
+
+ for _, i := range order {
+ m := ms[i]
+ result += fmt.Sprintf("%s\n", m.Id())
+
+ ks := make([]string, 0, len(m.RawConfig.Raw))
+ for k, _ := range m.RawConfig.Raw {
+ ks = append(ks, k)
+ }
+ sort.Strings(ks)
+
+ result += fmt.Sprintf(" source = %s\n", m.Source)
+
+ for _, k := range ks {
+ result += fmt.Sprintf(" %s\n", k)
+ }
+ }
+
+ return strings.TrimSpace(result)
+}
+
+func outputsStr(os []*Output) string {
+ ns := make([]string, 0, len(os))
+ m := make(map[string]*Output)
+ for _, o := range os {
+ ns = append(ns, o.Name)
+ m[o.Name] = o
+ }
+ sort.Strings(ns)
+
+ result := ""
+ for _, n := range ns {
+ o := m[n]
+
+ result += fmt.Sprintf("%s\n", n)
+
+ if len(o.DependsOn) > 0 {
+ result += fmt.Sprintf(" dependsOn\n")
+ for _, d := range o.DependsOn {
+ result += fmt.Sprintf(" %s\n", d)
+ }
+ }
+
+ if len(o.RawConfig.Variables) > 0 {
+ result += fmt.Sprintf(" vars\n")
+ for _, rawV := range o.RawConfig.Variables {
+ kind := "unknown"
+ str := rawV.FullKey()
+
+ switch rawV.(type) {
+ case *ResourceVariable:
+ kind = "resource"
+ case *UserVariable:
+ kind = "user"
+ }
+
+ result += fmt.Sprintf(" %s: %s\n", kind, str)
+ }
+ }
+ }
+
+ return strings.TrimSpace(result)
+}
+
+// This helper turns a provider configs field into a deterministic
+// string value for comparison in tests.
+func providerConfigsStr(pcs []*ProviderConfig) string {
+ result := ""
+
+ ns := make([]string, 0, len(pcs))
+ m := make(map[string]*ProviderConfig)
+ for _, n := range pcs {
+ ns = append(ns, n.Name)
+ m[n.Name] = n
+ }
+ sort.Strings(ns)
+
+ for _, n := range ns {
+ pc := m[n]
+
+ result += fmt.Sprintf("%s\n", n)
+
+ keys := make([]string, 0, len(pc.RawConfig.Raw))
+ for k, _ := range pc.RawConfig.Raw {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ result += fmt.Sprintf(" %s\n", k)
+ }
+
+ if len(pc.RawConfig.Variables) > 0 {
+ result += fmt.Sprintf(" vars\n")
+ for _, rawV := range pc.RawConfig.Variables {
+ kind := "unknown"
+ str := rawV.FullKey()
+
+ switch rawV.(type) {
+ case *ResourceVariable:
+ kind = "resource"
+ case *UserVariable:
+ kind = "user"
+ }
+
+ result += fmt.Sprintf(" %s: %s\n", kind, str)
+ }
+ }
+ }
+
+ return strings.TrimSpace(result)
+}
+
+// This helper turns a resources field into a deterministic
+// string value for comparison in tests.
+func resourcesStr(rs []*Resource) string {
+ result := ""
+ order := make([]int, 0, len(rs))
+ ks := make([]string, 0, len(rs))
+ mapping := make(map[string]int)
+ for i, r := range rs {
+ k := r.Id()
+ ks = append(ks, k)
+ mapping[k] = i
+ }
+ sort.Strings(ks)
+ for _, k := range ks {
+ order = append(order, mapping[k])
+ }
+
+ for _, i := range order {
+ r := rs[i]
+ result += fmt.Sprintf(
+ "%s (x%s)\n",
+ r.Id(),
+ r.RawCount.Value())
+
+ ks := make([]string, 0, len(r.RawConfig.Raw))
+ for k, _ := range r.RawConfig.Raw {
+ ks = append(ks, k)
+ }
+ sort.Strings(ks)
+
+ for _, k := range ks {
+ result += fmt.Sprintf(" %s\n", k)
+ }
+
+ if len(r.Provisioners) > 0 {
+ result += fmt.Sprintf(" provisioners\n")
+ for _, p := range r.Provisioners {
+ when := ""
+ if p.When != ProvisionerWhenCreate {
+ when = fmt.Sprintf(" (%s)", p.When.String())
+ }
+
+ result += fmt.Sprintf(" %s%s\n", p.Type, when)
+
+ if p.OnFailure != ProvisionerOnFailureFail {
+ result += fmt.Sprintf(" on_failure = %s\n", p.OnFailure.String())
+ }
+
+ ks := make([]string, 0, len(p.RawConfig.Raw))
+ for k, _ := range p.RawConfig.Raw {
+ ks = append(ks, k)
+ }
+ sort.Strings(ks)
+
+ for _, k := range ks {
+ result += fmt.Sprintf(" %s\n", k)
+ }
+ }
+ }
+
+ if len(r.DependsOn) > 0 {
+ result += fmt.Sprintf(" dependsOn\n")
+ for _, d := range r.DependsOn {
+ result += fmt.Sprintf(" %s\n", d)
+ }
+ }
+
+ if len(r.RawConfig.Variables) > 0 {
+ result += fmt.Sprintf(" vars\n")
+
+ ks := make([]string, 0, len(r.RawConfig.Variables))
+ for k, _ := range r.RawConfig.Variables {
+ ks = append(ks, k)
+ }
+ sort.Strings(ks)
+
+ for _, k := range ks {
+ rawV := r.RawConfig.Variables[k]
+ kind := "unknown"
+ str := rawV.FullKey()
+
+ switch rawV.(type) {
+ case *ResourceVariable:
+ kind = "resource"
+ case *UserVariable:
+ kind = "user"
+ }
+
+ result += fmt.Sprintf(" %s: %s\n", kind, str)
+ }
+ }
+ }
+
+ return strings.TrimSpace(result)
+}
+
+// This helper turns a variables field into a deterministic
+// string value for comparison in tests.
+func variablesStr(vs []*Variable) string {
+ result := ""
+ ks := make([]string, 0, len(vs))
+ m := make(map[string]*Variable)
+ for _, v := range vs {
+ ks = append(ks, v.Name)
+ m[v.Name] = v
+ }
+ sort.Strings(ks)
+
+ for _, k := range ks {
+ v := m[k]
+
+ required := ""
+ if v.Required() {
+ required = " (required)"
+ }
+
+ declaredType := ""
+ if v.DeclaredType != "" {
+ declaredType = fmt.Sprintf(" (%s)", v.DeclaredType)
+ }
+
+ if v.Default == nil || v.Default == "" {
+ v.Default = "<>"
+ }
+ if v.Description == "" {
+ v.Description = "<>"
+ }
+
+ result += fmt.Sprintf(
+ "%s%s%s\n %v\n %s\n",
+ k,
+ required,
+ declaredType,
+ v.Default,
+ v.Description)
+ }
+
+ return strings.TrimSpace(result)
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/config_terraform.go b/vendor/github.com/hashicorp/terraform/config/config_terraform.go
new file mode 100644
index 00000000..8535c964
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config_terraform.go
@@ -0,0 +1,117 @@
+package config
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-version"
+ "github.com/mitchellh/hashstructure"
+)
+
+// Terraform is the Terraform meta-configuration that can be present
+// in configuration files for configuring Terraform itself.
+type Terraform struct {
+ RequiredVersion string `hcl:"required_version"` // Required Terraform version (constraint)
+ Backend *Backend // See Backend struct docs
+}
+
+// Validate performs the validation for just the Terraform configuration.
+func (t *Terraform) Validate() []error {
+ var errs []error
+
+ if raw := t.RequiredVersion; raw != "" {
+ // Check that the value has no interpolations
+ rc, err := NewRawConfig(map[string]interface{}{
+ "root": raw,
+ })
+ if err != nil {
+ errs = append(errs, fmt.Errorf(
+ "terraform.required_version: %s", err))
+ } else if len(rc.Interpolations) > 0 {
+ errs = append(errs, fmt.Errorf(
+ "terraform.required_version: cannot contain interpolations"))
+ } else {
+ // Check it is valid
+ _, err := version.NewConstraint(raw)
+ if err != nil {
+ errs = append(errs, fmt.Errorf(
+ "terraform.required_version: invalid syntax: %s", err))
+ }
+ }
+ }
+
+ if t.Backend != nil {
+ errs = append(errs, t.Backend.Validate()...)
+ }
+
+ return errs
+}
+
+// Merge t with t2.
+// Any conflicting fields are overwritten by t2.
+func (t *Terraform) Merge(t2 *Terraform) {
+ if t2.RequiredVersion != "" {
+ t.RequiredVersion = t2.RequiredVersion
+ }
+
+ if t2.Backend != nil {
+ t.Backend = t2.Backend
+ }
+}
+
+// Backend is the configuration for the "backend" to use with Terraform.
+// A backend is responsible for all major behavior of Terraform's core.
+// The abstraction layer above the core (the "backend") allows for behavior
+// such as remote operation.
+type Backend struct {
+ Type string
+ RawConfig *RawConfig
+
+ // Hash is a unique hash code representing the original configuration
+ // of the backend. This won't be recomputed unless Rehash is called.
+ Hash uint64
+}
+
+// Rehash returns a unique content hash for this backend's configuration
+// as a uint64 value.
+func (b *Backend) Rehash() uint64 {
+ // If we have no backend, the value is zero
+ if b == nil {
+ return 0
+ }
+
+ // Use hashstructure to hash only our type with the config.
+ code, err := hashstructure.Hash(map[string]interface{}{
+ "type": b.Type,
+ "config": b.RawConfig.Raw,
+ }, nil)
+
+ // This should never happen since we have just some basic primitives
+ // so panic if there is an error.
+ if err != nil {
+ panic(err)
+ }
+
+ return code
+}
+
+func (b *Backend) Validate() []error {
+ if len(b.RawConfig.Interpolations) > 0 {
+ return []error{fmt.Errorf(strings.TrimSpace(errBackendInterpolations))}
+ }
+
+ return nil
+}
+
+const errBackendInterpolations = `
+terraform.backend: configuration cannot contain interpolations
+
+The backend configuration is loaded by Terraform extremely early, before
+the core of Terraform can be initialized. This is necessary because the backend
+dictates the behavior of that core. The core is what handles interpolation
+processing. Because of this, interpolations cannot be used in backend
+configuration.
+
+If you'd like to parameterize backend configuration, we recommend using
+partial configuration with the "-backend-config" flag to "terraform init".
+`
diff --git a/vendor/github.com/hashicorp/terraform/config/config_tree.go b/vendor/github.com/hashicorp/terraform/config/config_tree.go
new file mode 100644
index 00000000..08dc0fe9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config_tree.go
@@ -0,0 +1,43 @@
+package config
+
+// configTree represents a tree of configurations where the root is the
+// first file and its children are the configurations it has imported.
+type configTree struct {
+ Path string
+ Config *Config
+ Children []*configTree
+}
+
+// Flatten flattens the entire tree down to a single merged Config
+// structure.
+func (t *configTree) Flatten() (*Config, error) {
+ // No children is easy: we're already merged!
+ if len(t.Children) == 0 {
+ return t.Config, nil
+ }
+
+ // Depth-first, merge all the children first.
+ childConfigs := make([]*Config, len(t.Children))
+ for i, ct := range t.Children {
+ c, err := ct.Flatten()
+ if err != nil {
+ return nil, err
+ }
+
+ childConfigs[i] = c
+ }
+
+ // Merge all the children in order
+ config := childConfigs[0]
+ childConfigs = childConfigs[1:]
+ for _, config2 := range childConfigs {
+ var err error
+ config, err = Merge(config, config2)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Merge the final merged child config with our own
+ return Merge(config, t.Config)
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go
new file mode 100644
index 00000000..37ec11a1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/import_tree.go
@@ -0,0 +1,113 @@
+package config
+
+import (
+ "fmt"
+ "io"
+)
+
+// configurable is an interface that must be implemented by any configuration
+// formats of Terraform in order to return a *Config.
+type configurable interface {
+ Config() (*Config, error)
+}
+
+// importTree is the result of the first-pass load of the configuration
+// files. It is a tree of raw configurables and then any children (their
+// imports).
+//
+// An importTree can be turned into a configTree.
+type importTree struct {
+ Path string
+ Raw configurable
+ Children []*importTree
+}
+
+// This is the function type that must be implemented by the configuration
+// file loader to turn a single file into a configurable and any additional
+// imports.
+type fileLoaderFunc func(path string) (configurable, []string, error)
+
+// loadTree takes a single file and loads the entire importTree for that
+// file. This function detects what kind of configuration file it is an
+// executes the proper fileLoaderFunc.
+func loadTree(root string) (*importTree, error) {
+ var f fileLoaderFunc
+ switch ext(root) {
+ case ".tf", ".tf.json":
+ f = loadFileHcl
+ default:
+ }
+
+ if f == nil {
+ return nil, fmt.Errorf(
+ "%s: unknown configuration format. Use '.tf' or '.tf.json' extension",
+ root)
+ }
+
+ c, imps, err := f(root)
+ if err != nil {
+ return nil, err
+ }
+
+ children := make([]*importTree, len(imps))
+ for i, imp := range imps {
+ t, err := loadTree(imp)
+ if err != nil {
+ return nil, err
+ }
+
+ children[i] = t
+ }
+
+ return &importTree{
+ Path: root,
+ Raw: c,
+ Children: children,
+ }, nil
+}
+
+// Close releases any resources we might be holding open for the importTree.
+//
+// This can safely be called even while ConfigTree results are alive. The
+// importTree is not bound to these.
+func (t *importTree) Close() error {
+ if c, ok := t.Raw.(io.Closer); ok {
+ c.Close()
+ }
+ for _, ct := range t.Children {
+ ct.Close()
+ }
+
+ return nil
+}
+
+// ConfigTree traverses the importTree and turns each node into a *Config
+// object, ultimately returning a *configTree.
+func (t *importTree) ConfigTree() (*configTree, error) {
+ config, err := t.Raw.Config()
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error loading %s: %s",
+ t.Path,
+ err)
+ }
+
+ // Build our result
+ result := &configTree{
+ Path: t.Path,
+ Config: config,
+ }
+
+ // Build the config trees for the children
+ result.Children = make([]*configTree, len(t.Children))
+ for i, ct := range t.Children {
+ t, err := ct.ConfigTree()
+ if err != nil {
+ return nil, err
+ }
+
+ result.Children[i] = t
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go
new file mode 100644
index 00000000..bbb35554
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate.go
@@ -0,0 +1,386 @@
+package config
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/hil/ast"
+)
+
+// An InterpolatedVariable is a variable reference within an interpolation.
+//
+// Implementations of this interface represents various sources where
+// variables can come from: user variables, resources, etc.
+type InterpolatedVariable interface {
+ FullKey() string
+}
+
+// CountVariable is a variable for referencing information about
+// the count.
+type CountVariable struct {
+ Type CountValueType
+ key string
+}
+
+// CountValueType is the type of the count variable that is referenced.
+type CountValueType byte
+
+const (
+ CountValueInvalid CountValueType = iota
+ CountValueIndex
+)
+
+// A ModuleVariable is a variable that is referencing the output
+// of a module, such as "${module.foo.bar}"
+type ModuleVariable struct {
+ Name string
+ Field string
+ key string
+}
+
+// A PathVariable is a variable that references path information about the
+// module.
+type PathVariable struct {
+ Type PathValueType
+ key string
+}
+
+type PathValueType byte
+
+const (
+ PathValueInvalid PathValueType = iota
+ PathValueCwd
+ PathValueModule
+ PathValueRoot
+)
+
+// A ResourceVariable is a variable that is referencing the field
+// of a resource, such as "${aws_instance.foo.ami}"
+type ResourceVariable struct {
+ Mode ResourceMode
+ Type string // Resource type, i.e. "aws_instance"
+ Name string // Resource name
+ Field string // Resource field
+
+ Multi bool // True if multi-variable: aws_instance.foo.*.id
+ Index int // Index for multi-variable: aws_instance.foo.1.id == 1
+
+ key string
+}
+
+// SelfVariable is a variable that is referencing the same resource
+// it is running on: "${self.address}"
+type SelfVariable struct {
+ Field string
+
+ key string
+}
+
+// SimpleVariable is an unprefixed variable, which can show up when users have
+// strings they are passing down to resources that use interpolation
+// internally. The template_file resource is an example of this.
+type SimpleVariable struct {
+ Key string
+}
+
+// TerraformVariable is a "terraform."-prefixed variable used to access
+// metadata about the Terraform run.
+type TerraformVariable struct {
+ Field string
+ key string
+}
+
+// A UserVariable is a variable that is referencing a user variable
+// that is inputted from outside the configuration. This looks like
+// "${var.foo}"
+type UserVariable struct {
+ Name string
+ Elem string
+
+ key string
+}
+
+func NewInterpolatedVariable(v string) (InterpolatedVariable, error) {
+ if strings.HasPrefix(v, "count.") {
+ return NewCountVariable(v)
+ } else if strings.HasPrefix(v, "path.") {
+ return NewPathVariable(v)
+ } else if strings.HasPrefix(v, "self.") {
+ return NewSelfVariable(v)
+ } else if strings.HasPrefix(v, "terraform.") {
+ return NewTerraformVariable(v)
+ } else if strings.HasPrefix(v, "var.") {
+ return NewUserVariable(v)
+ } else if strings.HasPrefix(v, "module.") {
+ return NewModuleVariable(v)
+ } else if !strings.ContainsRune(v, '.') {
+ return NewSimpleVariable(v)
+ } else {
+ return NewResourceVariable(v)
+ }
+}
+
+func NewCountVariable(key string) (*CountVariable, error) {
+ var fieldType CountValueType
+ parts := strings.SplitN(key, ".", 2)
+ switch parts[1] {
+ case "index":
+ fieldType = CountValueIndex
+ }
+
+ return &CountVariable{
+ Type: fieldType,
+ key: key,
+ }, nil
+}
+
+func (c *CountVariable) FullKey() string {
+ return c.key
+}
+
+func NewModuleVariable(key string) (*ModuleVariable, error) {
+ parts := strings.SplitN(key, ".", 3)
+ if len(parts) < 3 {
+ return nil, fmt.Errorf(
+ "%s: module variables must be three parts: module.name.attr",
+ key)
+ }
+
+ return &ModuleVariable{
+ Name: parts[1],
+ Field: parts[2],
+ key: key,
+ }, nil
+}
+
+func (v *ModuleVariable) FullKey() string {
+ return v.key
+}
+
+func (v *ModuleVariable) GoString() string {
+ return fmt.Sprintf("*%#v", *v)
+}
+
+func NewPathVariable(key string) (*PathVariable, error) {
+ var fieldType PathValueType
+ parts := strings.SplitN(key, ".", 2)
+ switch parts[1] {
+ case "cwd":
+ fieldType = PathValueCwd
+ case "module":
+ fieldType = PathValueModule
+ case "root":
+ fieldType = PathValueRoot
+ }
+
+ return &PathVariable{
+ Type: fieldType,
+ key: key,
+ }, nil
+}
+
+func (v *PathVariable) FullKey() string {
+ return v.key
+}
+
+func NewResourceVariable(key string) (*ResourceVariable, error) {
+ var mode ResourceMode
+ var parts []string
+ if strings.HasPrefix(key, "data.") {
+ mode = DataResourceMode
+ parts = strings.SplitN(key, ".", 4)
+ if len(parts) < 4 {
+ return nil, fmt.Errorf(
+ "%s: data variables must be four parts: data.TYPE.NAME.ATTR",
+ key)
+ }
+
+ // Don't actually need the "data." prefix for parsing, since it's
+ // always constant.
+ parts = parts[1:]
+ } else {
+ mode = ManagedResourceMode
+ parts = strings.SplitN(key, ".", 3)
+ if len(parts) < 3 {
+ return nil, fmt.Errorf(
+ "%s: resource variables must be three parts: TYPE.NAME.ATTR",
+ key)
+ }
+ }
+
+ field := parts[2]
+ multi := false
+ var index int
+
+ if idx := strings.Index(field, "."); idx != -1 {
+ indexStr := field[:idx]
+ multi = indexStr == "*"
+ index = -1
+
+ if !multi {
+ indexInt, err := strconv.ParseInt(indexStr, 0, 0)
+ if err == nil {
+ multi = true
+ index = int(indexInt)
+ }
+ }
+
+ if multi {
+ field = field[idx+1:]
+ }
+ }
+
+ return &ResourceVariable{
+ Mode: mode,
+ Type: parts[0],
+ Name: parts[1],
+ Field: field,
+ Multi: multi,
+ Index: index,
+ key: key,
+ }, nil
+}
+
+func (v *ResourceVariable) ResourceId() string {
+ switch v.Mode {
+ case ManagedResourceMode:
+ return fmt.Sprintf("%s.%s", v.Type, v.Name)
+ case DataResourceMode:
+ return fmt.Sprintf("data.%s.%s", v.Type, v.Name)
+ default:
+ panic(fmt.Errorf("unknown resource mode %s", v.Mode))
+ }
+}
+
+func (v *ResourceVariable) FullKey() string {
+ return v.key
+}
+
+func NewSelfVariable(key string) (*SelfVariable, error) {
+ field := key[len("self."):]
+
+ return &SelfVariable{
+ Field: field,
+
+ key: key,
+ }, nil
+}
+
+func (v *SelfVariable) FullKey() string {
+ return v.key
+}
+
+func (v *SelfVariable) GoString() string {
+ return fmt.Sprintf("*%#v", *v)
+}
+
+func NewSimpleVariable(key string) (*SimpleVariable, error) {
+ return &SimpleVariable{key}, nil
+}
+
+func (v *SimpleVariable) FullKey() string {
+ return v.Key
+}
+
+func (v *SimpleVariable) GoString() string {
+ return fmt.Sprintf("*%#v", *v)
+}
+
+func NewTerraformVariable(key string) (*TerraformVariable, error) {
+ field := key[len("terraform."):]
+ return &TerraformVariable{
+ Field: field,
+ key: key,
+ }, nil
+}
+
+func (v *TerraformVariable) FullKey() string {
+ return v.key
+}
+
+func (v *TerraformVariable) GoString() string {
+ return fmt.Sprintf("*%#v", *v)
+}
+
+func NewUserVariable(key string) (*UserVariable, error) {
+ name := key[len("var."):]
+ elem := ""
+ if idx := strings.Index(name, "."); idx > -1 {
+ elem = name[idx+1:]
+ name = name[:idx]
+ }
+
+ if len(elem) > 0 {
+ return nil, fmt.Errorf("Invalid dot index found: 'var.%s.%s'. Values in maps and lists can be referenced using square bracket indexing, like: 'var.mymap[\"key\"]' or 'var.mylist[1]'.", name, elem)
+ }
+
+ return &UserVariable{
+ key: key,
+
+ Name: name,
+ Elem: elem,
+ }, nil
+}
+
+func (v *UserVariable) FullKey() string {
+ return v.key
+}
+
+func (v *UserVariable) GoString() string {
+ return fmt.Sprintf("*%#v", *v)
+}
+
+// DetectVariables takes an AST root and returns all the interpolated
+// variables that are detected in the AST tree.
+func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) {
+ var result []InterpolatedVariable
+ var resultErr error
+
+ // Visitor callback
+ fn := func(n ast.Node) ast.Node {
+ if resultErr != nil {
+ return n
+ }
+
+ switch vn := n.(type) {
+ case *ast.VariableAccess:
+ v, err := NewInterpolatedVariable(vn.Name)
+ if err != nil {
+ resultErr = err
+ return n
+ }
+ result = append(result, v)
+ case *ast.Index:
+ if va, ok := vn.Target.(*ast.VariableAccess); ok {
+ v, err := NewInterpolatedVariable(va.Name)
+ if err != nil {
+ resultErr = err
+ return n
+ }
+ result = append(result, v)
+ }
+ if va, ok := vn.Key.(*ast.VariableAccess); ok {
+ v, err := NewInterpolatedVariable(va.Name)
+ if err != nil {
+ resultErr = err
+ return n
+ }
+ result = append(result, v)
+ }
+ default:
+ return n
+ }
+
+ return n
+ }
+
+ // Visitor pattern
+ root.Accept(fn)
+
+ if resultErr != nil {
+ return nil, resultErr
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
new file mode 100644
index 00000000..b7933471
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
@@ -0,0 +1,1346 @@
+package config
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "math"
+ "net"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/apparentlymart/go-cidr/cidr"
+ "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/hil"
+ "github.com/hashicorp/hil/ast"
+ "github.com/mitchellh/go-homedir"
+)
+
+// stringSliceToVariableValue converts a string slice into the value
+// required to be returned from interpolation functions which return
+// TypeList.
+func stringSliceToVariableValue(values []string) []ast.Variable {
+ output := make([]ast.Variable, len(values))
+ for index, value := range values {
+ output[index] = ast.Variable{
+ Type: ast.TypeString,
+ Value: value,
+ }
+ }
+ return output
+}
+
+func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) {
+ output := make([]string, len(values))
+ for index, value := range values {
+ if value.Type != ast.TypeString {
+ return []string{}, fmt.Errorf("list has non-string element (%T)", value.Type.String())
+ }
+ output[index] = value.Value.(string)
+ }
+ return output, nil
+}
+
+// Funcs is the mapping of built-in functions for configuration.
+func Funcs() map[string]ast.Function {
+ return map[string]ast.Function{
+ "basename": interpolationFuncBasename(),
+ "base64decode": interpolationFuncBase64Decode(),
+ "base64encode": interpolationFuncBase64Encode(),
+ "base64sha256": interpolationFuncBase64Sha256(),
+ "ceil": interpolationFuncCeil(),
+ "chomp": interpolationFuncChomp(),
+ "cidrhost": interpolationFuncCidrHost(),
+ "cidrnetmask": interpolationFuncCidrNetmask(),
+ "cidrsubnet": interpolationFuncCidrSubnet(),
+ "coalesce": interpolationFuncCoalesce(),
+ "coalescelist": interpolationFuncCoalesceList(),
+ "compact": interpolationFuncCompact(),
+ "concat": interpolationFuncConcat(),
+ "dirname": interpolationFuncDirname(),
+ "distinct": interpolationFuncDistinct(),
+ "element": interpolationFuncElement(),
+ "file": interpolationFuncFile(),
+ "matchkeys": interpolationFuncMatchKeys(),
+ "floor": interpolationFuncFloor(),
+ "format": interpolationFuncFormat(),
+ "formatlist": interpolationFuncFormatList(),
+ "index": interpolationFuncIndex(),
+ "join": interpolationFuncJoin(),
+ "jsonencode": interpolationFuncJSONEncode(),
+ "length": interpolationFuncLength(),
+ "list": interpolationFuncList(),
+ "lower": interpolationFuncLower(),
+ "map": interpolationFuncMap(),
+ "max": interpolationFuncMax(),
+ "md5": interpolationFuncMd5(),
+ "merge": interpolationFuncMerge(),
+ "min": interpolationFuncMin(),
+ "pathexpand": interpolationFuncPathExpand(),
+ "uuid": interpolationFuncUUID(),
+ "replace": interpolationFuncReplace(),
+ "sha1": interpolationFuncSha1(),
+ "sha256": interpolationFuncSha256(),
+ "signum": interpolationFuncSignum(),
+ "slice": interpolationFuncSlice(),
+ "sort": interpolationFuncSort(),
+ "split": interpolationFuncSplit(),
+ "substr": interpolationFuncSubstr(),
+ "timestamp": interpolationFuncTimestamp(),
+ "title": interpolationFuncTitle(),
+ "trimspace": interpolationFuncTrimSpace(),
+ "upper": interpolationFuncUpper(),
+ "zipmap": interpolationFuncZipMap(),
+ }
+}
+
+// interpolationFuncList creates a list from the parameters passed
+// to it.
+func interpolationFuncList() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{},
+ ReturnType: ast.TypeList,
+ Variadic: true,
+ VariadicType: ast.TypeAny,
+ Callback: func(args []interface{}) (interface{}, error) {
+ var outputList []ast.Variable
+
+ for i, val := range args {
+ switch v := val.(type) {
+ case string:
+ outputList = append(outputList, ast.Variable{Type: ast.TypeString, Value: v})
+ case []ast.Variable:
+ outputList = append(outputList, ast.Variable{Type: ast.TypeList, Value: v})
+ case map[string]ast.Variable:
+ outputList = append(outputList, ast.Variable{Type: ast.TypeMap, Value: v})
+ default:
+ return nil, fmt.Errorf("unexpected type %T for argument %d in list", v, i)
+ }
+ }
+
+ // we don't support heterogeneous types, so make sure all types match the first
+ if len(outputList) > 0 {
+ firstType := outputList[0].Type
+ for i, v := range outputList[1:] {
+ if v.Type != firstType {
+ return nil, fmt.Errorf("unexpected type %s for argument %d in list", v.Type, i+1)
+ }
+ }
+ }
+
+ return outputList, nil
+ },
+ }
+}
+
+// interpolationFuncMap creates a map from the parameters passed
+// to it.
+func interpolationFuncMap() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{},
+ ReturnType: ast.TypeMap,
+ Variadic: true,
+ VariadicType: ast.TypeAny,
+ Callback: func(args []interface{}) (interface{}, error) {
+ outputMap := make(map[string]ast.Variable)
+
+ if len(args)%2 != 0 {
+ return nil, fmt.Errorf("requires an even number of arguments, got %d", len(args))
+ }
+
+ var firstType *ast.Type
+ for i := 0; i < len(args); i += 2 {
+ key, ok := args[i].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument %d represents a key, so it must be a string", i+1)
+ }
+ val := args[i+1]
+ variable, err := hil.InterfaceToVariable(val)
+ if err != nil {
+ return nil, err
+ }
+ // Enforce map type homogeneity
+ if firstType == nil {
+ firstType = &variable.Type
+ } else if variable.Type != *firstType {
+ return nil, fmt.Errorf("all map values must have the same type, got %s then %s", firstType.Printable(), variable.Type.Printable())
+ }
+ // Check for duplicate keys
+ if _, ok := outputMap[key]; ok {
+ return nil, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key)
+ }
+ outputMap[key] = variable
+ }
+
+ return outputMap, nil
+ },
+ }
+}
+
+// interpolationFuncCompact strips a list of multi-variable values
+// (e.g. as returned by "split") of any empty strings.
+func interpolationFuncCompact() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList},
+ ReturnType: ast.TypeList,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ inputList := args[0].([]ast.Variable)
+
+ var outputList []string
+ for _, val := range inputList {
+ strVal, ok := val.Value.(string)
+ if !ok {
+ return nil, fmt.Errorf(
+ "compact() may only be used with flat lists, this list contains elements of %s",
+ val.Type.Printable())
+ }
+ if strVal == "" {
+ continue
+ }
+
+ outputList = append(outputList, strVal)
+ }
+ return stringSliceToVariableValue(outputList), nil
+ },
+ }
+}
+
+// interpolationFuncCidrHost implements the "cidrhost" function that
+// fills in the host part of a CIDR range address to create a single
+// host address
+func interpolationFuncCidrHost() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{
+ ast.TypeString, // starting CIDR mask
+ ast.TypeInt, // host number to insert
+ },
+ ReturnType: ast.TypeString,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ hostNum := args[1].(int)
+ _, network, err := net.ParseCIDR(args[0].(string))
+ if err != nil {
+ return nil, fmt.Errorf("invalid CIDR expression: %s", err)
+ }
+
+ ip, err := cidr.Host(network, hostNum)
+ if err != nil {
+ return nil, err
+ }
+
+ return ip.String(), nil
+ },
+ }
+}
+
+// interpolationFuncCidrNetmask implements the "cidrnetmask" function
+// that returns the subnet mask in IP address notation.
+func interpolationFuncCidrNetmask() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{
+ ast.TypeString, // CIDR mask
+ },
+ ReturnType: ast.TypeString,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ _, network, err := net.ParseCIDR(args[0].(string))
+ if err != nil {
+ return nil, fmt.Errorf("invalid CIDR expression: %s", err)
+ }
+
+ return net.IP(network.Mask).String(), nil
+ },
+ }
+}
+
+// interpolationFuncCidrSubnet implements the "cidrsubnet" function that
+// adds an additional subnet of the given length onto an existing
+// IP block expressed in CIDR notation.
+func interpolationFuncCidrSubnet() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{
+ ast.TypeString, // starting CIDR mask
+ ast.TypeInt, // number of bits to extend the prefix
+ ast.TypeInt, // network number to append to the prefix
+ },
+ ReturnType: ast.TypeString,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ extraBits := args[1].(int)
+ subnetNum := args[2].(int)
+ _, network, err := net.ParseCIDR(args[0].(string))
+ if err != nil {
+ return nil, fmt.Errorf("invalid CIDR expression: %s", err)
+ }
+
+ // For portability with 32-bit systems where the subnet number
+ // will be a 32-bit int, we only allow extension of 32 bits in
+ // one call even if we're running on a 64-bit machine.
+ // (Of course, this is significant only for IPv6.)
+ if extraBits > 32 {
+ return nil, fmt.Errorf("may not extend prefix by more than 32 bits")
+ }
+
+ newNetwork, err := cidr.Subnet(network, extraBits, subnetNum)
+ if err != nil {
+ return nil, err
+ }
+
+ return newNetwork.String(), nil
+ },
+ }
+}
+
+// interpolationFuncCoalesce implements the "coalesce" function that
+// returns the first non null / empty string from the provided input
+func interpolationFuncCoalesce() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Variadic: true,
+ VariadicType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ if len(args) < 2 {
+ return nil, fmt.Errorf("must provide at least two arguments")
+ }
+ for _, arg := range args {
+ argument := arg.(string)
+
+ if argument != "" {
+ return argument, nil
+ }
+ }
+ return "", nil
+ },
+ }
+}
+
+// interpolationFuncCoalesceList implements the "coalescelist" function that
+// returns the first non empty list from the provided input
+func interpolationFuncCoalesceList() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList},
+ ReturnType: ast.TypeList,
+ Variadic: true,
+ VariadicType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ if len(args) < 2 {
+ return nil, fmt.Errorf("must provide at least two arguments")
+ }
+ for _, arg := range args {
+ argument := arg.([]ast.Variable)
+
+ if len(argument) > 0 {
+ return argument, nil
+ }
+ }
+ return make([]ast.Variable, 0), nil
+ },
+ }
+}
+
+// interpolationFuncConcat implements the "concat" function that concatenates
+// multiple lists.
+func interpolationFuncConcat() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList},
+ ReturnType: ast.TypeList,
+ Variadic: true,
+ VariadicType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ var outputList []ast.Variable
+
+ for _, arg := range args {
+ for _, v := range arg.([]ast.Variable) {
+ switch v.Type {
+ case ast.TypeString:
+ outputList = append(outputList, v)
+ case ast.TypeList:
+ outputList = append(outputList, v)
+ case ast.TypeMap:
+ outputList = append(outputList, v)
+ default:
+ return nil, fmt.Errorf("concat() does not support lists of %s", v.Type.Printable())
+ }
+ }
+ }
+
+ // we don't support heterogeneous types, so make sure all types match the first
+ if len(outputList) > 0 {
+ firstType := outputList[0].Type
+ for _, v := range outputList[1:] {
+ if v.Type != firstType {
+ return nil, fmt.Errorf("unexpected %s in list of %s", v.Type.Printable(), firstType.Printable())
+ }
+ }
+ }
+
+ return outputList, nil
+ },
+ }
+}
+
+// interpolationFuncFile implements the "file" function that allows
+// loading contents from a file.
+func interpolationFuncFile() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ path, err := homedir.Expand(args[0].(string))
+ if err != nil {
+ return "", err
+ }
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return "", err
+ }
+
+ return string(data), nil
+ },
+ }
+}
+
+// interpolationFuncFormat implements the "format" function that does
+// string formatting.
+func interpolationFuncFormat() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ Variadic: true,
+ VariadicType: ast.TypeAny,
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ format := args[0].(string)
+ return fmt.Sprintf(format, args[1:]...), nil
+ },
+ }
+}
+
+// interpolationFuncMax returns the maximum of the numeric arguments
+func interpolationFuncMax() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ ReturnType: ast.TypeFloat,
+ Variadic: true,
+ VariadicType: ast.TypeFloat,
+ Callback: func(args []interface{}) (interface{}, error) {
+ max := args[0].(float64)
+
+ for i := 1; i < len(args); i++ {
+ max = math.Max(max, args[i].(float64))
+ }
+
+ return max, nil
+ },
+ }
+}
+
+// interpolationFuncMin returns the minimum of the numeric arguments
+func interpolationFuncMin() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ ReturnType: ast.TypeFloat,
+ Variadic: true,
+ VariadicType: ast.TypeFloat,
+ Callback: func(args []interface{}) (interface{}, error) {
+ min := args[0].(float64)
+
+ for i := 1; i < len(args); i++ {
+ min = math.Min(min, args[i].(float64))
+ }
+
+ return min, nil
+ },
+ }
+}
+
+// interpolationFuncPathExpand will expand any `~`'s found with the full file path
+func interpolationFuncPathExpand() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return homedir.Expand(args[0].(string))
+ },
+ }
+}
+
+// interpolationFuncCeil returns the the least integer value greater than or equal to the argument
+func interpolationFuncCeil() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ ReturnType: ast.TypeInt,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return int(math.Ceil(args[0].(float64))), nil
+ },
+ }
+}
+
+// interpolationFuncChomp removes trailing newlines from the given string
+func interpolationFuncChomp() ast.Function {
+ newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`)
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return newlines.ReplaceAllString(args[0].(string), ""), nil
+ },
+ }
+}
+
+// interpolationFuncFloorreturns returns the greatest integer value less than or equal to the argument
+func interpolationFuncFloor() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ ReturnType: ast.TypeInt,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return int(math.Floor(args[0].(float64))), nil
+ },
+ }
+}
+
+func interpolationFuncZipMap() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{
+ ast.TypeList, // Keys
+ ast.TypeList, // Values
+ },
+ ReturnType: ast.TypeMap,
+ Callback: func(args []interface{}) (interface{}, error) {
+ keys := args[0].([]ast.Variable)
+ values := args[1].([]ast.Variable)
+
+ if len(keys) != len(values) {
+ return nil, fmt.Errorf("count of keys (%d) does not match count of values (%d)",
+ len(keys), len(values))
+ }
+
+ for i, val := range keys {
+ if val.Type != ast.TypeString {
+ return nil, fmt.Errorf("keys must be strings. value at position %d is %s",
+ i, val.Type.Printable())
+ }
+ }
+
+ result := map[string]ast.Variable{}
+ for i := 0; i < len(keys); i++ {
+ result[keys[i].Value.(string)] = values[i]
+ }
+
+ return result, nil
+ },
+ }
+}
+
+// interpolationFuncFormatList implements the "formatlist" function that does
+// string formatting on lists.
+func interpolationFuncFormatList() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeAny},
+ Variadic: true,
+ VariadicType: ast.TypeAny,
+ ReturnType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ // Make a copy of the variadic part of args
+ // to avoid modifying the original.
+ varargs := make([]interface{}, len(args)-1)
+ copy(varargs, args[1:])
+
+ // Verify we have some arguments
+ if len(varargs) == 0 {
+ return nil, fmt.Errorf("no arguments to formatlist")
+ }
+
+ // Convert arguments that are lists into slices.
+ // Confirm along the way that all lists have the same length (n).
+ var n int
+ listSeen := false
+ for i := 1; i < len(args); i++ {
+ s, ok := args[i].([]ast.Variable)
+ if !ok {
+ continue
+ }
+
+ // Mark that we've seen at least one list
+ listSeen = true
+
+ // Convert the ast.Variable to a slice of strings
+ parts, err := listVariableValueToStringSlice(s)
+ if err != nil {
+ return nil, err
+ }
+
+ // otherwise the list is sent down to be indexed
+ varargs[i-1] = parts
+
+ // Check length
+ if n == 0 {
+ // first list we've seen
+ n = len(parts)
+ continue
+ }
+ if n != len(parts) {
+ return nil, fmt.Errorf("format: mismatched list lengths: %d != %d", n, len(parts))
+ }
+ }
+
+ // If we didn't see a list this is an error because we
+ // can't determine the return value length.
+ if !listSeen {
+ return nil, fmt.Errorf(
+ "formatlist requires at least one list argument")
+ }
+
+ // Do the formatting.
+ format := args[0].(string)
+
+ // Generate a list of formatted strings.
+ list := make([]string, n)
+ fmtargs := make([]interface{}, len(varargs))
+ for i := 0; i < n; i++ {
+ for j, arg := range varargs {
+ switch arg := arg.(type) {
+ default:
+ fmtargs[j] = arg
+ case []string:
+ fmtargs[j] = arg[i]
+ }
+ }
+ list[i] = fmt.Sprintf(format, fmtargs...)
+ }
+ return stringSliceToVariableValue(list), nil
+ },
+ }
+}
+
+// interpolationFuncIndex implements the "index" function that allows one to
+// find the index of a specific element in a list
+func interpolationFuncIndex() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList, ast.TypeString},
+ ReturnType: ast.TypeInt,
+ Callback: func(args []interface{}) (interface{}, error) {
+ haystack := args[0].([]ast.Variable)
+ needle := args[1].(string)
+ for index, element := range haystack {
+ if needle == element.Value {
+ return index, nil
+ }
+ }
+ return nil, fmt.Errorf("Could not find '%s' in '%s'", needle, haystack)
+ },
+ }
+}
+
+// interpolationFuncBasename implements the "dirname" function.
+func interpolationFuncDirname() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return filepath.Dir(args[0].(string)), nil
+ },
+ }
+}
+
+// interpolationFuncDistinct implements the "distinct" function that
+// removes duplicate elements from a list.
+func interpolationFuncDistinct() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList},
+ ReturnType: ast.TypeList,
+ Variadic: true,
+ VariadicType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ var list []string
+
+ if len(args) != 1 {
+ return nil, fmt.Errorf("accepts only one argument.")
+ }
+
+ if argument, ok := args[0].([]ast.Variable); ok {
+ for _, element := range argument {
+ if element.Type != ast.TypeString {
+ return nil, fmt.Errorf(
+ "only works for flat lists, this list contains elements of %s",
+ element.Type.Printable())
+ }
+ list = appendIfMissing(list, element.Value.(string))
+ }
+ }
+
+ return stringSliceToVariableValue(list), nil
+ },
+ }
+}
+
+// helper function to add an element to a list, if it does not already exsit
+func appendIfMissing(slice []string, element string) []string {
+ for _, ele := range slice {
+ if ele == element {
+ return slice
+ }
+ }
+ return append(slice, element)
+}
+
+// for two lists `keys` and `values` of equal length, returns all elements
+// from `values` where the corresponding element from `keys` is in `searchset`.
+func interpolationFuncMatchKeys() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList, ast.TypeList, ast.TypeList},
+ ReturnType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ output := make([]ast.Variable, 0)
+
+ values, _ := args[0].([]ast.Variable)
+ keys, _ := args[1].([]ast.Variable)
+ searchset, _ := args[2].([]ast.Variable)
+
+ if len(keys) != len(values) {
+ return nil, fmt.Errorf("length of keys and values should be equal")
+ }
+
+ for i, key := range keys {
+ for _, search := range searchset {
+ if res, err := compareSimpleVariables(key, search); err != nil {
+ return nil, err
+ } else if res == true {
+ output = append(output, values[i])
+ break
+ }
+ }
+ }
+ // if searchset is empty, then output is an empty list as well.
+ // if we haven't matched any key, then output is an empty list.
+ return output, nil
+ },
+ }
+}
+
+// compare two variables of the same type, i.e. non complex one, such as TypeList or TypeMap
+func compareSimpleVariables(a, b ast.Variable) (bool, error) {
+ if a.Type != b.Type {
+ return false, fmt.Errorf(
+ "won't compare items of different types %s and %s",
+ a.Type.Printable(), b.Type.Printable())
+ }
+ switch a.Type {
+ case ast.TypeString:
+ return a.Value.(string) == b.Value.(string), nil
+ default:
+ return false, fmt.Errorf(
+ "can't compare items of type %s",
+ a.Type.Printable())
+ }
+}
+
+// interpolationFuncJoin implements the "join" function that allows
+// multi-variable values to be joined by some character.
+func interpolationFuncJoin() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ Variadic: true,
+ VariadicType: ast.TypeList,
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ var list []string
+
+ if len(args) < 2 {
+ return nil, fmt.Errorf("not enough arguments to join()")
+ }
+
+ for _, arg := range args[1:] {
+ for _, part := range arg.([]ast.Variable) {
+ if part.Type != ast.TypeString {
+ return nil, fmt.Errorf(
+ "only works on flat lists, this list contains elements of %s",
+ part.Type.Printable())
+ }
+ list = append(list, part.Value.(string))
+ }
+ }
+
+ return strings.Join(list, args[0].(string)), nil
+ },
+ }
+}
+
+// interpolationFuncJSONEncode implements the "jsonencode" function that encodes
+// a string, list, or map as its JSON representation. For now, values in the
+// list or map may only be strings.
+func interpolationFuncJSONEncode() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeAny},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ var toEncode interface{}
+
+ switch typedArg := args[0].(type) {
+ case string:
+ toEncode = typedArg
+
+ case []ast.Variable:
+ // We preallocate the list here. Note that it's important that in
+ // the length 0 case, we have an empty list rather than nil, as
+ // they encode differently.
+ // XXX It would be nice to support arbitrarily nested data here. Is
+ // there an inverse of hil.InterfaceToVariable?
+ strings := make([]string, len(typedArg))
+
+ for i, v := range typedArg {
+ if v.Type != ast.TypeString {
+ return "", fmt.Errorf("list elements must be strings")
+ }
+ strings[i] = v.Value.(string)
+ }
+ toEncode = strings
+
+ case map[string]ast.Variable:
+ // XXX It would be nice to support arbitrarily nested data here. Is
+ // there an inverse of hil.InterfaceToVariable?
+ stringMap := make(map[string]string)
+ for k, v := range typedArg {
+ if v.Type != ast.TypeString {
+ return "", fmt.Errorf("map values must be strings")
+ }
+ stringMap[k] = v.Value.(string)
+ }
+ toEncode = stringMap
+
+ default:
+ return "", fmt.Errorf("unknown type for JSON encoding: %T", args[0])
+ }
+
+ jEnc, err := json.Marshal(toEncode)
+ if err != nil {
+ return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode)
+ }
+ return string(jEnc), nil
+ },
+ }
+}
+
+// interpolationFuncReplace implements the "replace" function that does
+// string replacement.
+func interpolationFuncReplace() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString, ast.TypeString, ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ search := args[1].(string)
+ replace := args[2].(string)
+
+ // We search/replace using a regexp if the string is surrounded
+ // in forward slashes.
+ if len(search) > 1 && search[0] == '/' && search[len(search)-1] == '/' {
+ re, err := regexp.Compile(search[1 : len(search)-1])
+ if err != nil {
+ return nil, err
+ }
+
+ return re.ReplaceAllString(s, replace), nil
+ }
+
+ return strings.Replace(s, search, replace, -1), nil
+ },
+ }
+}
+
+func interpolationFuncLength() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeAny},
+ ReturnType: ast.TypeInt,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ subject := args[0]
+
+ switch typedSubject := subject.(type) {
+ case string:
+ return len(typedSubject), nil
+ case []ast.Variable:
+ return len(typedSubject), nil
+ case map[string]ast.Variable:
+ return len(typedSubject), nil
+ }
+
+ return 0, fmt.Errorf("arguments to length() must be a string, list, or map")
+ },
+ }
+}
+
+func interpolationFuncSignum() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ ReturnType: ast.TypeInt,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ num := args[0].(int)
+ switch {
+ case num < 0:
+ return -1, nil
+ case num > 0:
+ return +1, nil
+ default:
+ return 0, nil
+ }
+ },
+ }
+}
+
+// interpolationFuncSlice returns a portion of the input list between from, inclusive and to, exclusive.
+func interpolationFuncSlice() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{
+ ast.TypeList, // inputList
+ ast.TypeInt, // from
+ ast.TypeInt, // to
+ },
+ ReturnType: ast.TypeList,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ inputList := args[0].([]ast.Variable)
+ from := args[1].(int)
+ to := args[2].(int)
+
+ if from < 0 {
+ return nil, fmt.Errorf("from index must be >= 0")
+ }
+ if to > len(inputList) {
+ return nil, fmt.Errorf("to index must be <= length of the input list")
+ }
+ if from > to {
+ return nil, fmt.Errorf("from index must be <= to index")
+ }
+
+ var outputList []ast.Variable
+ for i, val := range inputList {
+ if i >= from && i < to {
+ outputList = append(outputList, val)
+ }
+ }
+ return outputList, nil
+ },
+ }
+}
+
+// interpolationFuncSort sorts a list of a strings lexographically
+func interpolationFuncSort() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList},
+ ReturnType: ast.TypeList,
+ Variadic: false,
+ Callback: func(args []interface{}) (interface{}, error) {
+ inputList := args[0].([]ast.Variable)
+
+ // Ensure that all the list members are strings and
+ // create a string slice from them
+ members := make([]string, len(inputList))
+ for i, val := range inputList {
+ if val.Type != ast.TypeString {
+ return nil, fmt.Errorf(
+ "sort() may only be used with lists of strings - %s at index %d",
+ val.Type.String(), i)
+ }
+
+ members[i] = val.Value.(string)
+ }
+
+ sort.Strings(members)
+ return stringSliceToVariableValue(members), nil
+ },
+ }
+}
+
+// interpolationFuncSplit implements the "split" function that allows
+// strings to split into multi-variable values
+func interpolationFuncSplit() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString, ast.TypeString},
+ ReturnType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ sep := args[0].(string)
+ s := args[1].(string)
+ elements := strings.Split(s, sep)
+ return stringSliceToVariableValue(elements), nil
+ },
+ }
+}
+
+// interpolationFuncLookup implements the "lookup" function that allows
+// dynamic lookups of map types within a Terraform configuration.
+func interpolationFuncLookup(vs map[string]ast.Variable) ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeMap, ast.TypeString},
+ ReturnType: ast.TypeString,
+ Variadic: true,
+ VariadicType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ defaultValue := ""
+ defaultValueSet := false
+ if len(args) > 2 {
+ defaultValue = args[2].(string)
+ defaultValueSet = true
+ }
+ if len(args) > 3 {
+ return "", fmt.Errorf("lookup() takes no more than three arguments")
+ }
+ index := args[1].(string)
+ mapVar := args[0].(map[string]ast.Variable)
+
+ v, ok := mapVar[index]
+ if !ok {
+ if defaultValueSet {
+ return defaultValue, nil
+ } else {
+ return "", fmt.Errorf(
+ "lookup failed to find '%s'",
+ args[1].(string))
+ }
+ }
+ if v.Type != ast.TypeString {
+ return nil, fmt.Errorf(
+ "lookup() may only be used with flat maps, this map contains elements of %s",
+ v.Type.Printable())
+ }
+
+ return v.Value.(string), nil
+ },
+ }
+}
+
+// interpolationFuncElement implements the "element" function that allows
+// a specific index to be looked up in a multi-variable value. Note that this will
+// wrap if the index is larger than the number of elements in the multi-variable value.
+func interpolationFuncElement() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeList, ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ list := args[0].([]ast.Variable)
+ if len(list) == 0 {
+ return nil, fmt.Errorf("element() may not be used with an empty list")
+ }
+
+ index, err := strconv.Atoi(args[1].(string))
+ if err != nil || index < 0 {
+ return "", fmt.Errorf(
+ "invalid number for index, got %s", args[1])
+ }
+
+ resolvedIndex := index % len(list)
+
+ v := list[resolvedIndex]
+ if v.Type != ast.TypeString {
+ return nil, fmt.Errorf(
+ "element() may only be used with flat lists, this list contains elements of %s",
+ v.Type.Printable())
+ }
+ return v.Value, nil
+ },
+ }
+}
+
+// interpolationFuncKeys implements the "keys" function that yields a list of
+// keys of map types within a Terraform configuration.
+func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeMap},
+ ReturnType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ mapVar := args[0].(map[string]ast.Variable)
+ keys := make([]string, 0)
+
+ for k, _ := range mapVar {
+ keys = append(keys, k)
+ }
+
+ sort.Strings(keys)
+
+ // Keys are guaranteed to be strings
+ return stringSliceToVariableValue(keys), nil
+ },
+ }
+}
+
+// interpolationFuncValues implements the "values" function that yields a list of
+// keys of map types within a Terraform configuration.
+func interpolationFuncValues(vs map[string]ast.Variable) ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeMap},
+ ReturnType: ast.TypeList,
+ Callback: func(args []interface{}) (interface{}, error) {
+ mapVar := args[0].(map[string]ast.Variable)
+ keys := make([]string, 0)
+
+ for k, _ := range mapVar {
+ keys = append(keys, k)
+ }
+
+ sort.Strings(keys)
+
+ values := make([]string, len(keys))
+ for index, key := range keys {
+ if value, ok := mapVar[key].Value.(string); ok {
+ values[index] = value
+ } else {
+ return "", fmt.Errorf("values(): %q has element with bad type %s",
+ key, mapVar[key].Type)
+ }
+ }
+
+ variable, err := hil.InterfaceToVariable(values)
+ if err != nil {
+ return nil, err
+ }
+
+ return variable.Value, nil
+ },
+ }
+}
+
+// interpolationFuncBasename implements the "basename" function.
+func interpolationFuncBasename() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return filepath.Base(args[0].(string)), nil
+ },
+ }
+}
+
+// interpolationFuncBase64Encode implements the "base64encode" function that
+// allows Base64 encoding.
+func interpolationFuncBase64Encode() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ return base64.StdEncoding.EncodeToString([]byte(s)), nil
+ },
+ }
+}
+
+// interpolationFuncBase64Decode implements the "base64decode" function that
+// allows Base64 decoding.
+func interpolationFuncBase64Decode() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ sDec, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ return "", fmt.Errorf("failed to decode base64 data '%s'", s)
+ }
+ return string(sDec), nil
+ },
+ }
+}
+
+// interpolationFuncLower implements the "lower" function that does
+// string lower casing.
+func interpolationFuncLower() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ toLower := args[0].(string)
+ return strings.ToLower(toLower), nil
+ },
+ }
+}
+
+func interpolationFuncMd5() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ h := md5.New()
+ h.Write([]byte(s))
+ hash := hex.EncodeToString(h.Sum(nil))
+ return hash, nil
+ },
+ }
+}
+
+func interpolationFuncMerge() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeMap},
+ ReturnType: ast.TypeMap,
+ Variadic: true,
+ VariadicType: ast.TypeMap,
+ Callback: func(args []interface{}) (interface{}, error) {
+ outputMap := make(map[string]ast.Variable)
+
+ for _, arg := range args {
+ for k, v := range arg.(map[string]ast.Variable) {
+ outputMap[k] = v
+ }
+ }
+
+ return outputMap, nil
+ },
+ }
+}
+
+// interpolationFuncUpper implements the "upper" function that does
+// string upper casing.
+func interpolationFuncUpper() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ toUpper := args[0].(string)
+ return strings.ToUpper(toUpper), nil
+ },
+ }
+}
+
+func interpolationFuncSha1() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ h := sha1.New()
+ h.Write([]byte(s))
+ hash := hex.EncodeToString(h.Sum(nil))
+ return hash, nil
+ },
+ }
+}
+
+// hexadecimal representation of sha256 sum
+func interpolationFuncSha256() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ h := sha256.New()
+ h.Write([]byte(s))
+ hash := hex.EncodeToString(h.Sum(nil))
+ return hash, nil
+ },
+ }
+}
+
+func interpolationFuncTrimSpace() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ trimSpace := args[0].(string)
+ return strings.TrimSpace(trimSpace), nil
+ },
+ }
+}
+
+func interpolationFuncBase64Sha256() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ h := sha256.New()
+ h.Write([]byte(s))
+ shaSum := h.Sum(nil)
+ encoded := base64.StdEncoding.EncodeToString(shaSum[:])
+ return encoded, nil
+ },
+ }
+}
+
+func interpolationFuncUUID() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return uuid.GenerateUUID()
+ },
+ }
+}
+
+// interpolationFuncTimestamp
+func interpolationFuncTimestamp() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ return time.Now().UTC().Format(time.RFC3339), nil
+ },
+ }
+}
+
+// interpolationFuncTitle implements the "title" function that returns a copy of the
+// string in which first characters of all the words are capitalized.
+func interpolationFuncTitle() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ toTitle := args[0].(string)
+ return strings.Title(toTitle), nil
+ },
+ }
+}
+
+// interpolationFuncSubstr implements the "substr" function that allows strings
+// to be truncated.
+func interpolationFuncSubstr() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{
+ ast.TypeString, // input string
+ ast.TypeInt, // offset
+ ast.TypeInt, // length
+ },
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ str := args[0].(string)
+ offset := args[1].(int)
+ length := args[2].(int)
+
+ // Interpret a negative offset as being equivalent to a positive
+ // offset taken from the end of the string.
+ if offset < 0 {
+ offset += len(str)
+ }
+
+ // Interpret a length of `-1` as indicating that the substring
+ // should start at `offset` and continue until the end of the
+ // string. Any other negative length (other than `-1`) is invalid.
+ if length == -1 {
+ length = len(str)
+ } else if length >= 0 {
+ length += offset
+ } else {
+ return nil, fmt.Errorf("length should be a non-negative integer")
+ }
+
+ if offset > len(str) {
+ return nil, fmt.Errorf("offset cannot be larger than the length of the string")
+ }
+
+ if length > len(str) {
+ return nil, fmt.Errorf("'offset + length' cannot be larger than the length of the string")
+ }
+
+ return str[offset:length], nil
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
new file mode 100644
index 00000000..ead3d102
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
@@ -0,0 +1,283 @@
+package config
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/hashicorp/hil"
+ "github.com/hashicorp/hil/ast"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// interpolationWalker implements interfaces for the reflectwalk package
+// (github.com/mitchellh/reflectwalk) that can be used to automatically
+// execute a callback for an interpolation.
+type interpolationWalker struct {
+ // F is the function to call for every interpolation. It can be nil.
+ //
+ // If Replace is true, then the return value of F will be used to
+ // replace the interpolation.
+ F interpolationWalkerFunc
+ Replace bool
+
+ // ContextF is an advanced version of F that also receives the
+ // location of where it is in the structure. This lets you do
+ // context-aware validation.
+ ContextF interpolationWalkerContextFunc
+
+ key []string
+ lastValue reflect.Value
+ loc reflectwalk.Location
+ cs []reflect.Value
+ csKey []reflect.Value
+ csData interface{}
+ sliceIndex []int
+ unknownKeys []string
+}
+
+// interpolationWalkerFunc is the callback called by interpolationWalk.
+// It is called with any interpolation found. It should return a value
+// to replace the interpolation with, along with any errors.
+//
+// If Replace is set to false in interpolationWalker, then the replace
+// value can be anything as it will have no effect.
+type interpolationWalkerFunc func(ast.Node) (interface{}, error)
+
+// interpolationWalkerContextFunc is called by interpolationWalk if
+// ContextF is set. This receives both the interpolation and the location
+// where the interpolation is.
+//
+// This callback can be used to validate the location of the interpolation
+// within the configuration.
+type interpolationWalkerContextFunc func(reflectwalk.Location, ast.Node)
+
+func (w *interpolationWalker) Enter(loc reflectwalk.Location) error {
+ w.loc = loc
+ return nil
+}
+
+func (w *interpolationWalker) Exit(loc reflectwalk.Location) error {
+ w.loc = reflectwalk.None
+
+ switch loc {
+ case reflectwalk.Map:
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.MapValue:
+ w.key = w.key[:len(w.key)-1]
+ w.csKey = w.csKey[:len(w.csKey)-1]
+ case reflectwalk.Slice:
+ // Split any values that need to be split
+ w.splitSlice()
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.SliceElem:
+ w.csKey = w.csKey[:len(w.csKey)-1]
+ w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1]
+ }
+
+ return nil
+}
+
+func (w *interpolationWalker) Map(m reflect.Value) error {
+ w.cs = append(w.cs, m)
+ return nil
+}
+
+func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error {
+ w.csData = k
+ w.csKey = append(w.csKey, k)
+
+ if l := len(w.sliceIndex); l > 0 {
+ w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex[l-1], k.String()))
+ } else {
+ w.key = append(w.key, k.String())
+ }
+
+ w.lastValue = v
+ return nil
+}
+
+func (w *interpolationWalker) Slice(s reflect.Value) error {
+ w.cs = append(w.cs, s)
+ return nil
+}
+
+func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error {
+ w.csKey = append(w.csKey, reflect.ValueOf(i))
+ w.sliceIndex = append(w.sliceIndex, i)
+ return nil
+}
+
+func (w *interpolationWalker) Primitive(v reflect.Value) error {
+ setV := v
+
+ // We only care about strings
+ if v.Kind() == reflect.Interface {
+ setV = v
+ v = v.Elem()
+ }
+ if v.Kind() != reflect.String {
+ return nil
+ }
+
+ astRoot, err := hil.Parse(v.String())
+ if err != nil {
+ return err
+ }
+
+ // If the AST we got is just a literal string value with the same
+ // value then we ignore it. We have to check if its the same value
+ // because it is possible to input a string, get out a string, and
+ // have it be different. For example: "foo-$${bar}" turns into
+ // "foo-${bar}"
+ if n, ok := astRoot.(*ast.LiteralNode); ok {
+ if s, ok := n.Value.(string); ok && s == v.String() {
+ return nil
+ }
+ }
+
+ if w.ContextF != nil {
+ w.ContextF(w.loc, astRoot)
+ }
+
+ if w.F == nil {
+ return nil
+ }
+
+ replaceVal, err := w.F(astRoot)
+ if err != nil {
+ return fmt.Errorf(
+ "%s in:\n\n%s",
+ err, v.String())
+ }
+
+ if w.Replace {
+ // We need to determine if we need to remove this element
+ // if the result contains any "UnknownVariableValue" which is
+ // set if it is computed. This behavior is different if we're
+ // splitting (in a SliceElem) or not.
+ remove := false
+ if w.loc == reflectwalk.SliceElem {
+ switch typedReplaceVal := replaceVal.(type) {
+ case string:
+ if typedReplaceVal == UnknownVariableValue {
+ remove = true
+ }
+ case []interface{}:
+ if hasUnknownValue(typedReplaceVal) {
+ remove = true
+ }
+ }
+ } else if replaceVal == UnknownVariableValue {
+ remove = true
+ }
+
+ if remove {
+ w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
+ }
+
+ resultVal := reflect.ValueOf(replaceVal)
+ switch w.loc {
+ case reflectwalk.MapKey:
+ m := w.cs[len(w.cs)-1]
+
+ // Delete the old value
+ var zero reflect.Value
+ m.SetMapIndex(w.csData.(reflect.Value), zero)
+
+ // Set the new key with the existing value
+ m.SetMapIndex(resultVal, w.lastValue)
+
+ // Set the key to be the new key
+ w.csData = resultVal
+ case reflectwalk.MapValue:
+ // If we're in a map, then the only way to set a map value is
+ // to set it directly.
+ m := w.cs[len(w.cs)-1]
+ mk := w.csData.(reflect.Value)
+ m.SetMapIndex(mk, resultVal)
+ default:
+ // Otherwise, we should be addressable
+ setV.Set(resultVal)
+ }
+ }
+
+ return nil
+}
+
+func (w *interpolationWalker) replaceCurrent(v reflect.Value) {
+ // if we don't have at least 2 values, we're not going to find a map, but
+ // we could panic.
+ if len(w.cs) < 2 {
+ return
+ }
+
+ c := w.cs[len(w.cs)-2]
+ switch c.Kind() {
+ case reflect.Map:
+ // Get the key and delete it
+ k := w.csKey[len(w.csKey)-1]
+ c.SetMapIndex(k, v)
+ }
+}
+
+func hasUnknownValue(variable []interface{}) bool {
+ for _, value := range variable {
+ if strVal, ok := value.(string); ok {
+ if strVal == UnknownVariableValue {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (w *interpolationWalker) splitSlice() {
+ raw := w.cs[len(w.cs)-1]
+
+ var s []interface{}
+ switch v := raw.Interface().(type) {
+ case []interface{}:
+ s = v
+ case []map[string]interface{}:
+ return
+ }
+
+ split := false
+ for _, val := range s {
+ if varVal, ok := val.(ast.Variable); ok && varVal.Type == ast.TypeList {
+ split = true
+ }
+ if _, ok := val.([]interface{}); ok {
+ split = true
+ }
+ }
+
+ if !split {
+ return
+ }
+
+ result := make([]interface{}, 0)
+ for _, v := range s {
+ switch val := v.(type) {
+ case ast.Variable:
+ switch val.Type {
+ case ast.TypeList:
+ elements := val.Value.([]ast.Variable)
+ for _, element := range elements {
+ result = append(result, element.Value)
+ }
+ default:
+ result = append(result, val.Value)
+ }
+ case []interface{}:
+ for _, element := range val {
+ result = append(result, element)
+ }
+ default:
+ result = append(result, v)
+ }
+ }
+
+ w.replaceCurrent(reflect.ValueOf(result))
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/lang.go b/vendor/github.com/hashicorp/terraform/config/lang.go
new file mode 100644
index 00000000..890d30be
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/lang.go
@@ -0,0 +1,11 @@
+package config
+
+import (
+ "github.com/hashicorp/hil/ast"
+)
+
+type noopNode struct{}
+
+func (n *noopNode) Accept(ast.Visitor) ast.Node { return n }
+func (n *noopNode) Pos() ast.Pos { return ast.Pos{} }
+func (n *noopNode) Type(ast.Scope) (ast.Type, error) { return ast.TypeString, nil }
diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go
new file mode 100644
index 00000000..0bfa89c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/loader.go
@@ -0,0 +1,224 @@
+package config
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/hcl"
+)
+
+// ErrNoConfigsFound is the error returned by LoadDir if no
+// Terraform configuration files were found in the given directory.
+type ErrNoConfigsFound struct {
+ Dir string
+}
+
+func (e ErrNoConfigsFound) Error() string {
+ return fmt.Sprintf(
+ "No Terraform configuration files found in directory: %s",
+ e.Dir)
+}
+
+// LoadJSON loads a single Terraform configuration from a given JSON document.
+//
+// The document must be a complete Terraform configuration. This function will
+// NOT try to load any additional modules so only the given document is loaded.
+func LoadJSON(raw json.RawMessage) (*Config, error) {
+ obj, err := hcl.Parse(string(raw))
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing JSON document as HCL: %s", err)
+ }
+
+ // Start building the result
+ hclConfig := &hclConfigurable{
+ Root: obj,
+ }
+
+ return hclConfig.Config()
+}
+
+// LoadFile loads the Terraform configuration from a given file.
+//
+// This file can be any format that Terraform recognizes, and import any
+// other format that Terraform recognizes.
+func LoadFile(path string) (*Config, error) {
+ importTree, err := loadTree(path)
+ if err != nil {
+ return nil, err
+ }
+
+ configTree, err := importTree.ConfigTree()
+
+ // Close the importTree now so that we can clear resources as quickly
+ // as possible.
+ importTree.Close()
+
+ if err != nil {
+ return nil, err
+ }
+
+ return configTree.Flatten()
+}
+
+// LoadDir loads all the Terraform configuration files in a single
+// directory and appends them together.
+//
+// Special files known as "override files" can also be present, which
+// are merged into the loaded configuration. That is, the non-override
+// files are loaded first to create the configuration. Then, the overrides
+// are merged into the configuration to create the final configuration.
+//
+// Files are loaded in lexical order.
+func LoadDir(root string) (*Config, error) {
+ files, overrides, err := dirFiles(root)
+ if err != nil {
+ return nil, err
+ }
+ if len(files) == 0 {
+ return nil, &ErrNoConfigsFound{Dir: root}
+ }
+
+ // Determine the absolute path to the directory.
+ rootAbs, err := filepath.Abs(root)
+ if err != nil {
+ return nil, err
+ }
+
+ var result *Config
+
+ // Sort the files and overrides so we have a deterministic order
+ sort.Strings(files)
+ sort.Strings(overrides)
+
+ // Load all the regular files, append them to each other.
+ for _, f := range files {
+ c, err := LoadFile(f)
+ if err != nil {
+ return nil, err
+ }
+
+ if result != nil {
+ result, err = Append(result, c)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ result = c
+ }
+ }
+
+ // Load all the overrides, and merge them into the config
+ for _, f := range overrides {
+ c, err := LoadFile(f)
+ if err != nil {
+ return nil, err
+ }
+
+ result, err = Merge(result, c)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Mark the directory
+ result.Dir = rootAbs
+
+ return result, nil
+}
+
+// IsEmptyDir returns true if the directory given has no Terraform
+// configuration files.
+func IsEmptyDir(root string) (bool, error) {
+ if _, err := os.Stat(root); err != nil && os.IsNotExist(err) {
+ return true, nil
+ }
+
+ fs, os, err := dirFiles(root)
+ if err != nil {
+ return false, err
+ }
+
+ return len(fs) == 0 && len(os) == 0, nil
+}
+
+// Ext returns the Terraform configuration extension of the given
+// path, or a blank string if it is an invalid function.
+func ext(path string) string {
+ if strings.HasSuffix(path, ".tf") {
+ return ".tf"
+ } else if strings.HasSuffix(path, ".tf.json") {
+ return ".tf.json"
+ } else {
+ return ""
+ }
+}
+
+func dirFiles(dir string) ([]string, []string, error) {
+ f, err := os.Open(dir)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer f.Close()
+
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, nil, err
+ }
+ if !fi.IsDir() {
+ return nil, nil, fmt.Errorf(
+ "configuration path must be a directory: %s",
+ dir)
+ }
+
+ var files, overrides []string
+ err = nil
+ for err != io.EOF {
+ var fis []os.FileInfo
+ fis, err = f.Readdir(128)
+ if err != nil && err != io.EOF {
+ return nil, nil, err
+ }
+
+ for _, fi := range fis {
+ // Ignore directories
+ if fi.IsDir() {
+ continue
+ }
+
+ // Only care about files that are valid to load
+ name := fi.Name()
+ extValue := ext(name)
+ if extValue == "" || isIgnoredFile(name) {
+ continue
+ }
+
+ // Determine if we're dealing with an override
+ nameNoExt := name[:len(name)-len(extValue)]
+ override := nameNoExt == "override" ||
+ strings.HasSuffix(nameNoExt, "_override")
+
+ path := filepath.Join(dir, name)
+ if override {
+ overrides = append(overrides, path)
+ } else {
+ files = append(files, path)
+ }
+ }
+ }
+
+ return files, overrides, nil
+}
+
+// isIgnoredFile returns true or false depending on whether the
+// provided file name is a file that should be ignored.
+func isIgnoredFile(name string) bool {
+ return strings.HasPrefix(name, ".") || // Unix-like hidden files
+ strings.HasSuffix(name, "~") || // vim
+ strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go
new file mode 100644
index 00000000..a40ad5ba
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go
@@ -0,0 +1,1091 @@
+package config
+
+import (
+ "fmt"
+ "io/ioutil"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/mitchellh/mapstructure"
+)
+
+// hclConfigurable is an implementation of configurable that knows
+// how to turn HCL configuration into a *Config object.
+type hclConfigurable struct {
+ File string
+ Root *ast.File
+}
+
+func (t *hclConfigurable) Config() (*Config, error) {
+ validKeys := map[string]struct{}{
+ "atlas": struct{}{},
+ "data": struct{}{},
+ "module": struct{}{},
+ "output": struct{}{},
+ "provider": struct{}{},
+ "resource": struct{}{},
+ "terraform": struct{}{},
+ "variable": struct{}{},
+ }
+
+ // Top-level item should be the object list
+ list, ok := t.Root.Node.(*ast.ObjectList)
+ if !ok {
+ return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
+ }
+
+ // Start building up the actual configuration.
+ config := new(Config)
+
+ // Terraform config
+ if o := list.Filter("terraform"); len(o.Items) > 0 {
+ var err error
+ config.Terraform, err = loadTerraformHcl(o)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Build the variables
+ if vars := list.Filter("variable"); len(vars.Items) > 0 {
+ var err error
+ config.Variables, err = loadVariablesHcl(vars)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Get Atlas configuration
+ if atlas := list.Filter("atlas"); len(atlas.Items) > 0 {
+ var err error
+ config.Atlas, err = loadAtlasHcl(atlas)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Build the modules
+ if modules := list.Filter("module"); len(modules.Items) > 0 {
+ var err error
+ config.Modules, err = loadModulesHcl(modules)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Build the provider configs
+ if providers := list.Filter("provider"); len(providers.Items) > 0 {
+ var err error
+ config.ProviderConfigs, err = loadProvidersHcl(providers)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Build the resources
+ {
+ var err error
+ managedResourceConfigs := list.Filter("resource")
+ dataResourceConfigs := list.Filter("data")
+
+ config.Resources = make(
+ []*Resource, 0,
+ len(managedResourceConfigs.Items)+len(dataResourceConfigs.Items),
+ )
+
+ managedResources, err := loadManagedResourcesHcl(managedResourceConfigs)
+ if err != nil {
+ return nil, err
+ }
+ dataResources, err := loadDataResourcesHcl(dataResourceConfigs)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Resources = append(config.Resources, dataResources...)
+ config.Resources = append(config.Resources, managedResources...)
+ }
+
+ // Build the outputs
+ if outputs := list.Filter("output"); len(outputs.Items) > 0 {
+ var err error
+ config.Outputs, err = loadOutputsHcl(outputs)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Check for invalid keys
+ for _, item := range list.Items {
+ if len(item.Keys) == 0 {
+ // Not sure how this would happen, but let's avoid a panic
+ continue
+ }
+
+ k := item.Keys[0].Token.Value().(string)
+ if _, ok := validKeys[k]; ok {
+ continue
+ }
+
+ config.unknownKeys = append(config.unknownKeys, k)
+ }
+
+ return config, nil
+}
+
+// loadFileHcl is a fileLoaderFunc that knows how to read HCL
+// files and turn them into hclConfigurables.
+func loadFileHcl(root string) (configurable, []string, error) {
+ // Read the HCL file and prepare for parsing
+ d, err := ioutil.ReadFile(root)
+ if err != nil {
+ return nil, nil, fmt.Errorf(
+ "Error reading %s: %s", root, err)
+ }
+
+ // Parse it
+ hclRoot, err := hcl.Parse(string(d))
+ if err != nil {
+ return nil, nil, fmt.Errorf(
+ "Error parsing %s: %s", root, err)
+ }
+
+ // Start building the result
+ result := &hclConfigurable{
+ File: root,
+ Root: hclRoot,
+ }
+
+ // Dive in, find the imports. This is disabled for now since
+ // imports were removed prior to Terraform 0.1. The code is
+ // remaining here commented for historical purposes.
+ /*
+ imports := obj.Get("import")
+ if imports == nil {
+ result.Object.Ref()
+ return result, nil, nil
+ }
+
+ if imports.Type() != libucl.ObjectTypeString {
+ imports.Close()
+
+ return nil, nil, fmt.Errorf(
+ "Error in %s: all 'import' declarations should be in the format\n"+
+ "`import \"foo\"` (Got type %s)",
+ root,
+ imports.Type())
+ }
+
+ // Gather all the import paths
+ importPaths := make([]string, 0, imports.Len())
+ iter := imports.Iterate(false)
+ for imp := iter.Next(); imp != nil; imp = iter.Next() {
+ path := imp.ToString()
+ if !filepath.IsAbs(path) {
+ // Relative paths are relative to the Terraform file itself
+ dir := filepath.Dir(root)
+ path = filepath.Join(dir, path)
+ }
+
+ importPaths = append(importPaths, path)
+ imp.Close()
+ }
+ iter.Close()
+ imports.Close()
+
+ result.Object.Ref()
+ */
+
+ return result, nil, nil
+}
+
+// Given a handle to a HCL object, this transforms it into the Terraform config
+func loadTerraformHcl(list *ast.ObjectList) (*Terraform, error) {
+ if len(list.Items) > 1 {
+ return nil, fmt.Errorf("only one 'terraform' block allowed per module")
+ }
+
+ // Get our one item
+ item := list.Items[0]
+
+ // This block should have an empty top level ObjectItem. If there are keys
+ // here, it's likely because we have a flattened JSON object, and we can
+ // lift this into a nested ObjectList to decode properly.
+ if len(item.Keys) > 0 {
+ item = &ast.ObjectItem{
+ Val: &ast.ObjectType{
+ List: &ast.ObjectList{
+ Items: []*ast.ObjectItem{item},
+ },
+ },
+ }
+ }
+
+ // We need the item value as an ObjectList
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("terraform block: should be an object")
+ }
+
+ // NOTE: We purposely don't validate unknown HCL keys here so that
+ // we can potentially read _future_ Terraform version config (to
+ // still be able to validate the required version).
+ //
+ // We should still keep track of unknown keys to validate later, but
+ // HCL doesn't currently support that.
+
+ var config Terraform
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error reading terraform config: %s",
+ err)
+ }
+
+ // If we have provisioners, then parse those out
+ if os := listVal.Filter("backend"); len(os.Items) > 0 {
+ var err error
+ config.Backend, err = loadTerraformBackendHcl(os)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading backend config for terraform block: %s",
+ err)
+ }
+ }
+
+ return &config, nil
+}
+
+// Loads the Backend configuration from an object list.
+func loadTerraformBackendHcl(list *ast.ObjectList) (*Backend, error) {
+ if len(list.Items) > 1 {
+ return nil, fmt.Errorf("only one 'backend' block allowed")
+ }
+
+ // Get our one item
+ item := list.Items[0]
+
+ // Verify the keys
+ if len(item.Keys) != 1 {
+ return nil, fmt.Errorf(
+ "position %s: 'backend' must be followed by exactly one string: a type",
+ item.Pos())
+ }
+
+ typ := item.Keys[0].Token.Value().(string)
+
+ // Decode the raw config
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error reading backend config: %s",
+ err)
+ }
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading backend config: %s",
+ err)
+ }
+
+ b := &Backend{
+ Type: typ,
+ RawConfig: rawConfig,
+ }
+ b.Hash = b.Rehash()
+
+ return b, nil
+}
+
+// Given a handle to a HCL object, this transforms it into the Atlas
+// configuration.
+func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) {
+ if len(list.Items) > 1 {
+ return nil, fmt.Errorf("only one 'atlas' block allowed")
+ }
+
+ // Get our one item
+ item := list.Items[0]
+
+ var config AtlasConfig
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error reading atlas config: %s",
+ err)
+ }
+
+ return &config, nil
+}
+
+// Given a handle to a HCL object, this recurses into the structure
+// and pulls out a list of modules.
+//
+// The resulting modules may not be unique, but each module
+// represents exactly one module definition in the HCL configuration.
+// We leave it up to another pass to merge them together.
+func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, nil
+ }
+
+ // Where all the results will go
+ var result []*Module
+
+ // Now go over all the types and their children in order to get
+ // all of the actual resources.
+ for _, item := range list.Items {
+ k := item.Keys[0].Token.Value().(string)
+
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("module '%s': should be an object", k)
+ }
+
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for %s: %s",
+ k,
+ err)
+ }
+
+ // Remove the fields we handle specially
+ delete(config, "source")
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for %s: %s",
+ k,
+ err)
+ }
+
+ // If we have a count, then figure it out
+ var source string
+ if o := listVal.Filter("source"); len(o.Items) > 0 {
+ err = hcl.DecodeObject(&source, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing source for %s: %s",
+ k,
+ err)
+ }
+ }
+
+ result = append(result, &Module{
+ Name: k,
+ Source: source,
+ RawConfig: rawConfig,
+ })
+ }
+
+ return result, nil
+}
+
+// LoadOutputsHcl recurses into the given HCL object and turns
+// it into a mapping of outputs.
+func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, fmt.Errorf(
+ "'output' must be followed by exactly one string: a name")
+ }
+
+ // Go through each object and turn it into an actual result.
+ result := make([]*Output, 0, len(list.Items))
+ for _, item := range list.Items {
+ n := item.Keys[0].Token.Value().(string)
+
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("output '%s': should be an object", n)
+ }
+
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, err
+ }
+
+ // Delete special keys
+ delete(config, "depends_on")
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for output %s: %s",
+ n,
+ err)
+ }
+
+ // If we have depends fields, then add those in
+ var dependsOn []string
+ if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading depends_on for output %q: %s",
+ n,
+ err)
+ }
+ }
+
+ result = append(result, &Output{
+ Name: n,
+ RawConfig: rawConfig,
+ DependsOn: dependsOn,
+ })
+ }
+
+ return result, nil
+}
+
+// LoadVariablesHcl recurses into the given HCL object and turns
+// it into a list of variables.
+func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, fmt.Errorf(
+ "'variable' must be followed by exactly one strings: a name")
+ }
+
+ // hclVariable is the structure each variable is decoded into
+ type hclVariable struct {
+ DeclaredType string `hcl:"type"`
+ Default interface{}
+ Description string
+ Fields []string `hcl:",decodedFields"`
+ }
+
+ // Go through each object and turn it into an actual result.
+ result := make([]*Variable, 0, len(list.Items))
+ for _, item := range list.Items {
+ // Clean up items from JSON
+ unwrapHCLObjectKeysFromJSON(item, 1)
+
+ // Verify the keys
+ if len(item.Keys) != 1 {
+ return nil, fmt.Errorf(
+ "position %s: 'variable' must be followed by exactly one strings: a name",
+ item.Pos())
+ }
+
+ n := item.Keys[0].Token.Value().(string)
+ if !NameRegexp.MatchString(n) {
+ return nil, fmt.Errorf(
+ "position %s: 'variable' name must match regular expression: %s",
+ item.Pos(), NameRegexp)
+ }
+
+ // Check for invalid keys
+ valid := []string{"type", "default", "description"}
+ if err := checkHCLKeys(item.Val, valid); err != nil {
+ return nil, multierror.Prefix(err, fmt.Sprintf(
+ "variable[%s]:", n))
+ }
+
+ // Decode into hclVariable to get typed values
+ var hclVar hclVariable
+ if err := hcl.DecodeObject(&hclVar, item.Val); err != nil {
+ return nil, err
+ }
+
+ // Defaults turn into a slice of map[string]interface{} and
+ // we need to make sure to convert that down into the
+ // proper type for Config.
+ if ms, ok := hclVar.Default.([]map[string]interface{}); ok {
+ def := make(map[string]interface{})
+ for _, m := range ms {
+ for k, v := range m {
+ def[k] = v
+ }
+ }
+
+ hclVar.Default = def
+ }
+
+ // Build the new variable and do some basic validation
+ newVar := &Variable{
+ Name: n,
+ DeclaredType: hclVar.DeclaredType,
+ Default: hclVar.Default,
+ Description: hclVar.Description,
+ }
+ if err := newVar.ValidateTypeAndDefault(); err != nil {
+ return nil, err
+ }
+
+ result = append(result, newVar)
+ }
+
+ return result, nil
+}
+
+// LoadProvidersHcl recurses into the given HCL object and turns
+// it into a mapping of provider configs.
+func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, nil
+ }
+
+ // Go through each object and turn it into an actual result.
+ result := make([]*ProviderConfig, 0, len(list.Items))
+ for _, item := range list.Items {
+ n := item.Keys[0].Token.Value().(string)
+
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("module '%s': should be an object", n)
+ }
+
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, err
+ }
+
+ delete(config, "alias")
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for provider config %s: %s",
+ n,
+ err)
+ }
+
+ // If we have an alias field, then add those in
+ var alias string
+ if a := listVal.Filter("alias"); len(a.Items) > 0 {
+ err := hcl.DecodeObject(&alias, a.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading alias for provider[%s]: %s",
+ n,
+ err)
+ }
+ }
+
+ result = append(result, &ProviderConfig{
+ Name: n,
+ Alias: alias,
+ RawConfig: rawConfig,
+ })
+ }
+
+ return result, nil
+}
+
+// Given a handle to a HCL object, this recurses into the structure
+// and pulls out a list of data sources.
+//
+// The resulting data sources may not be unique, but each one
+// represents exactly one data definition in the HCL configuration.
+// We leave it up to another pass to merge them together.
+func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, nil
+ }
+
+ // Where all the results will go
+ var result []*Resource
+
+ // Now go over all the types and their children in order to get
+ // all of the actual resources.
+ for _, item := range list.Items {
+ if len(item.Keys) != 2 {
+ return nil, fmt.Errorf(
+ "position %s: 'data' must be followed by exactly two strings: a type and a name",
+ item.Pos())
+ }
+
+ t := item.Keys[0].Token.Value().(string)
+ k := item.Keys[1].Token.Value().(string)
+
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("data sources %s[%s]: should be an object", t, k)
+ }
+
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+
+ // Remove the fields we handle specially
+ delete(config, "depends_on")
+ delete(config, "provider")
+ delete(config, "count")
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+
+ // If we have a count, then figure it out
+ var count string = "1"
+ if o := listVal.Filter("count"); len(o.Items) > 0 {
+ err = hcl.DecodeObject(&count, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing count for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+ countConfig, err := NewRawConfig(map[string]interface{}{
+ "count": count,
+ })
+ if err != nil {
+ return nil, err
+ }
+ countConfig.Key = "count"
+
+ // If we have depends fields, then add those in
+ var dependsOn []string
+ if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading depends_on for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ // If we have a provider, then parse it out
+ var provider string
+ if o := listVal.Filter("provider"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&provider, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading provider for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ result = append(result, &Resource{
+ Mode: DataResourceMode,
+ Name: k,
+ Type: t,
+ RawCount: countConfig,
+ RawConfig: rawConfig,
+ Provider: provider,
+ Provisioners: []*Provisioner{},
+ DependsOn: dependsOn,
+ Lifecycle: ResourceLifecycle{},
+ })
+ }
+
+ return result, nil
+}
+
+// Given a handle to a HCL object, this recurses into the structure
+// and pulls out a list of managed resources.
+//
+// The resulting resources may not be unique, but each resource
+// represents exactly one "resource" block in the HCL configuration.
+// We leave it up to another pass to merge them together.
+func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, nil
+ }
+
+ // Where all the results will go
+ var result []*Resource
+
+ // Now go over all the types and their children in order to get
+ // all of the actual resources.
+ for _, item := range list.Items {
+ // GH-4385: We detect a pure provisioner resource and give the user
+ // an error about how to do it cleanly.
+ if len(item.Keys) == 4 && item.Keys[2].Token.Value().(string) == "provisioner" {
+ return nil, fmt.Errorf(
+ "position %s: provisioners in a resource should be wrapped in a list\n\n"+
+ "Example: \"provisioner\": [ { \"local-exec\": ... } ]",
+ item.Pos())
+ }
+
+ // Fix up JSON input
+ unwrapHCLObjectKeysFromJSON(item, 2)
+
+ if len(item.Keys) != 2 {
+ return nil, fmt.Errorf(
+ "position %s: resource must be followed by exactly two strings, a type and a name",
+ item.Pos())
+ }
+
+ t := item.Keys[0].Token.Value().(string)
+ k := item.Keys[1].Token.Value().(string)
+
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("resources %s[%s]: should be an object", t, k)
+ }
+
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+
+ // Remove the fields we handle specially
+ delete(config, "connection")
+ delete(config, "count")
+ delete(config, "depends_on")
+ delete(config, "provisioner")
+ delete(config, "provider")
+ delete(config, "lifecycle")
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading config for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+
+ // If we have a count, then figure it out
+ var count string = "1"
+ if o := listVal.Filter("count"); len(o.Items) > 0 {
+ err = hcl.DecodeObject(&count, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing count for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+ countConfig, err := NewRawConfig(map[string]interface{}{
+ "count": count,
+ })
+ if err != nil {
+ return nil, err
+ }
+ countConfig.Key = "count"
+
+ // If we have depends fields, then add those in
+ var dependsOn []string
+ if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading depends_on for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ // If we have connection info, then parse those out
+ var connInfo map[string]interface{}
+ if o := listVal.Filter("connection"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&connInfo, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading connection info for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ // If we have provisioners, then parse those out
+ var provisioners []*Provisioner
+ if os := listVal.Filter("provisioner"); len(os.Items) > 0 {
+ var err error
+ provisioners, err = loadProvisionersHcl(os, connInfo)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading provisioners for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ // If we have a provider, then parse it out
+ var provider string
+ if o := listVal.Filter("provider"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&provider, o.Items[0].Val)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading provider for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ // Check if the resource should be re-created before
+ // destroying the existing instance
+ var lifecycle ResourceLifecycle
+ if o := listVal.Filter("lifecycle"); len(o.Items) > 0 {
+ if len(o.Items) > 1 {
+ return nil, fmt.Errorf(
+ "%s[%s]: Multiple lifecycle blocks found, expected one",
+ t, k)
+ }
+
+ // Check for invalid keys
+ valid := []string{"create_before_destroy", "ignore_changes", "prevent_destroy"}
+ if err := checkHCLKeys(o.Items[0].Val, valid); err != nil {
+ return nil, multierror.Prefix(err, fmt.Sprintf(
+ "%s[%s]:", t, k))
+ }
+
+ var raw map[string]interface{}
+ if err = hcl.DecodeObject(&raw, o.Items[0].Val); err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing lifecycle for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+
+ if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil {
+ return nil, fmt.Errorf(
+ "Error parsing lifecycle for %s[%s]: %s",
+ t,
+ k,
+ err)
+ }
+ }
+
+ result = append(result, &Resource{
+ Mode: ManagedResourceMode,
+ Name: k,
+ Type: t,
+ RawCount: countConfig,
+ RawConfig: rawConfig,
+ Provisioners: provisioners,
+ Provider: provider,
+ DependsOn: dependsOn,
+ Lifecycle: lifecycle,
+ })
+ }
+
+ return result, nil
+}
+
+func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) {
+ list = list.Children()
+ if len(list.Items) == 0 {
+ return nil, nil
+ }
+
+ // Go through each object and turn it into an actual result.
+ result := make([]*Provisioner, 0, len(list.Items))
+ for _, item := range list.Items {
+ n := item.Keys[0].Token.Value().(string)
+
+ var listVal *ast.ObjectList
+ if ot, ok := item.Val.(*ast.ObjectType); ok {
+ listVal = ot.List
+ } else {
+ return nil, fmt.Errorf("provisioner '%s': should be an object", n)
+ }
+
+ var config map[string]interface{}
+ if err := hcl.DecodeObject(&config, item.Val); err != nil {
+ return nil, err
+ }
+
+ // Parse the "when" value
+ when := ProvisionerWhenCreate
+ if v, ok := config["when"]; ok {
+ switch v {
+ case "create":
+ when = ProvisionerWhenCreate
+ case "destroy":
+ when = ProvisionerWhenDestroy
+ default:
+ return nil, fmt.Errorf(
+ "position %s: 'provisioner' when must be 'create' or 'destroy'",
+ item.Pos())
+ }
+ }
+
+ // Parse the "on_failure" value
+ onFailure := ProvisionerOnFailureFail
+ if v, ok := config["on_failure"]; ok {
+ switch v {
+ case "continue":
+ onFailure = ProvisionerOnFailureContinue
+ case "fail":
+ onFailure = ProvisionerOnFailureFail
+ default:
+ return nil, fmt.Errorf(
+ "position %s: 'provisioner' on_failure must be 'continue' or 'fail'",
+ item.Pos())
+ }
+ }
+
+ // Delete fields we special case
+ delete(config, "connection")
+ delete(config, "when")
+ delete(config, "on_failure")
+
+ rawConfig, err := NewRawConfig(config)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check if we have a provisioner-level connection
+ // block that overrides the resource-level
+ var subConnInfo map[string]interface{}
+ if o := listVal.Filter("connection"); len(o.Items) > 0 {
+ err := hcl.DecodeObject(&subConnInfo, o.Items[0].Val)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Inherit from the resource connInfo any keys
+ // that are not explicitly overriden.
+ if connInfo != nil && subConnInfo != nil {
+ for k, v := range connInfo {
+ if _, ok := subConnInfo[k]; !ok {
+ subConnInfo[k] = v
+ }
+ }
+ } else if subConnInfo == nil {
+ subConnInfo = connInfo
+ }
+
+ // Parse the connInfo
+ connRaw, err := NewRawConfig(subConnInfo)
+ if err != nil {
+ return nil, err
+ }
+
+ result = append(result, &Provisioner{
+ Type: n,
+ RawConfig: rawConfig,
+ ConnInfo: connRaw,
+ When: when,
+ OnFailure: onFailure,
+ })
+ }
+
+ return result, nil
+}
+
+/*
+func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode {
+ objects := make(map[string][]*hclobj.Object)
+
+ for _, o := range os.Elem(false) {
+ for _, elem := range o.Elem(true) {
+ val, ok := objects[elem.Key]
+ if !ok {
+ val = make([]*hclobj.Object, 0, 1)
+ }
+
+ val = append(val, elem)
+ objects[elem.Key] = val
+ }
+ }
+
+ return objects
+}
+*/
+
+func checkHCLKeys(node ast.Node, valid []string) error {
+ var list *ast.ObjectList
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ list = n
+ case *ast.ObjectType:
+ list = n.List
+ default:
+ return fmt.Errorf("cannot check HCL keys of type %T", n)
+ }
+
+ validMap := make(map[string]struct{}, len(valid))
+ for _, v := range valid {
+ validMap[v] = struct{}{}
+ }
+
+ var result error
+ for _, item := range list.Items {
+ key := item.Keys[0].Token.Value().(string)
+ if _, ok := validMap[key]; !ok {
+ result = multierror.Append(result, fmt.Errorf(
+ "invalid key: %s", key))
+ }
+ }
+
+ return result
+}
+
+// unwrapHCLObjectKeysFromJSON cleans up an edge case that can occur when
+// parsing JSON as input: if we're parsing JSON then directly nested
+// items will show up as additional "keys".
+//
+// For objects that expect a fixed number of keys, this breaks the
+// decoding process. This function unwraps the object into what it would've
+// looked like if it came directly from HCL by specifying the number of keys
+// you expect.
+//
+// Example:
+//
+// { "foo": { "baz": {} } }
+//
+// Will show up with Keys being: []string{"foo", "baz"}
+// when we really just want the first two. This function will fix this.
+func unwrapHCLObjectKeysFromJSON(item *ast.ObjectItem, depth int) {
+ if len(item.Keys) > depth && item.Keys[0].Token.JSON {
+ for len(item.Keys) > depth {
+ // Pop off the last key
+ n := len(item.Keys)
+ key := item.Keys[n-1]
+ item.Keys[n-1] = nil
+ item.Keys = item.Keys[:n-1]
+
+ // Wrap our value in a list
+ item.Val = &ast.ObjectType{
+ List: &ast.ObjectList{
+ Items: []*ast.ObjectItem{
+ &ast.ObjectItem{
+ Keys: []*ast.ObjectKey{key},
+ Val: item.Val,
+ },
+ },
+ },
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go
new file mode 100644
index 00000000..db214be4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/merge.go
@@ -0,0 +1,193 @@
+package config
+
+// Merge merges two configurations into a single configuration.
+//
+// Merge allows for the two configurations to have duplicate resources,
+// because the resources will be merged. This differs from a single
+// Config which must only have unique resources.
+func Merge(c1, c2 *Config) (*Config, error) {
+ c := new(Config)
+
+ // Merge unknown keys
+ unknowns := make(map[string]struct{})
+ for _, k := range c1.unknownKeys {
+ _, present := unknowns[k]
+ if !present {
+ unknowns[k] = struct{}{}
+ c.unknownKeys = append(c.unknownKeys, k)
+ }
+ }
+ for _, k := range c2.unknownKeys {
+ _, present := unknowns[k]
+ if !present {
+ unknowns[k] = struct{}{}
+ c.unknownKeys = append(c.unknownKeys, k)
+ }
+ }
+
+ // Merge Atlas configuration. This is a dumb one overrides the other
+ // sort of merge.
+ c.Atlas = c1.Atlas
+ if c2.Atlas != nil {
+ c.Atlas = c2.Atlas
+ }
+
+ // Merge the Terraform configuration
+ if c1.Terraform != nil {
+ c.Terraform = c1.Terraform
+ if c2.Terraform != nil {
+ c.Terraform.Merge(c2.Terraform)
+ }
+ } else {
+ c.Terraform = c2.Terraform
+ }
+
+ // NOTE: Everything below is pretty gross. Due to the lack of generics
+ // in Go, there is some hoop-jumping involved to make this merging a
+ // little more test-friendly and less repetitive. Ironically, making it
+ // less repetitive involves being a little repetitive, but I prefer to
+ // be repetitive with things that are less error prone than things that
+ // are more error prone (more logic). Type conversions to an interface
+ // are pretty low-error.
+
+ var m1, m2, mresult []merger
+
+ // Modules
+ m1 = make([]merger, 0, len(c1.Modules))
+ m2 = make([]merger, 0, len(c2.Modules))
+ for _, v := range c1.Modules {
+ m1 = append(m1, v)
+ }
+ for _, v := range c2.Modules {
+ m2 = append(m2, v)
+ }
+ mresult = mergeSlice(m1, m2)
+ if len(mresult) > 0 {
+ c.Modules = make([]*Module, len(mresult))
+ for i, v := range mresult {
+ c.Modules[i] = v.(*Module)
+ }
+ }
+
+ // Outputs
+ m1 = make([]merger, 0, len(c1.Outputs))
+ m2 = make([]merger, 0, len(c2.Outputs))
+ for _, v := range c1.Outputs {
+ m1 = append(m1, v)
+ }
+ for _, v := range c2.Outputs {
+ m2 = append(m2, v)
+ }
+ mresult = mergeSlice(m1, m2)
+ if len(mresult) > 0 {
+ c.Outputs = make([]*Output, len(mresult))
+ for i, v := range mresult {
+ c.Outputs[i] = v.(*Output)
+ }
+ }
+
+ // Provider Configs
+ m1 = make([]merger, 0, len(c1.ProviderConfigs))
+ m2 = make([]merger, 0, len(c2.ProviderConfigs))
+ for _, v := range c1.ProviderConfigs {
+ m1 = append(m1, v)
+ }
+ for _, v := range c2.ProviderConfigs {
+ m2 = append(m2, v)
+ }
+ mresult = mergeSlice(m1, m2)
+ if len(mresult) > 0 {
+ c.ProviderConfigs = make([]*ProviderConfig, len(mresult))
+ for i, v := range mresult {
+ c.ProviderConfigs[i] = v.(*ProviderConfig)
+ }
+ }
+
+ // Resources
+ m1 = make([]merger, 0, len(c1.Resources))
+ m2 = make([]merger, 0, len(c2.Resources))
+ for _, v := range c1.Resources {
+ m1 = append(m1, v)
+ }
+ for _, v := range c2.Resources {
+ m2 = append(m2, v)
+ }
+ mresult = mergeSlice(m1, m2)
+ if len(mresult) > 0 {
+ c.Resources = make([]*Resource, len(mresult))
+ for i, v := range mresult {
+ c.Resources[i] = v.(*Resource)
+ }
+ }
+
+ // Variables
+ m1 = make([]merger, 0, len(c1.Variables))
+ m2 = make([]merger, 0, len(c2.Variables))
+ for _, v := range c1.Variables {
+ m1 = append(m1, v)
+ }
+ for _, v := range c2.Variables {
+ m2 = append(m2, v)
+ }
+ mresult = mergeSlice(m1, m2)
+ if len(mresult) > 0 {
+ c.Variables = make([]*Variable, len(mresult))
+ for i, v := range mresult {
+ c.Variables[i] = v.(*Variable)
+ }
+ }
+
+ return c, nil
+}
+
+// merger is an interface that must be implemented by types that are
+// merge-able. This simplifies the implementation of Merge for the various
+// components of a Config.
+type merger interface {
+ mergerName() string
+ mergerMerge(merger) merger
+}
+
+// mergeSlice merges a slice of mergers.
+func mergeSlice(m1, m2 []merger) []merger {
+ r := make([]merger, len(m1), len(m1)+len(m2))
+ copy(r, m1)
+
+ m := map[string]struct{}{}
+ for _, v2 := range m2 {
+ // If we already saw it, just append it because its a
+ // duplicate and invalid...
+ name := v2.mergerName()
+ if _, ok := m[name]; ok {
+ r = append(r, v2)
+ continue
+ }
+ m[name] = struct{}{}
+
+ // Find an original to override
+ var original merger
+ originalIndex := -1
+ for i, v := range m1 {
+ if v.mergerName() == name {
+ originalIndex = i
+ original = v
+ break
+ }
+ }
+
+ var v merger
+ if original == nil {
+ v = v2
+ } else {
+ v = original.mergerMerge(v2)
+ }
+
+ if originalIndex == -1 {
+ r = append(r, v)
+ } else {
+ r[originalIndex] = v
+ }
+ }
+
+ return r
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go
new file mode 100644
index 00000000..095f61d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go
@@ -0,0 +1,114 @@
+package module
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// copyDir copies the src directory contents into dst. Both directories
+// should already exist.
+func copyDir(dst, src string) error {
+ src, err := filepath.EvalSymlinks(src)
+ if err != nil {
+ return err
+ }
+
+ walkFn := func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if path == src {
+ return nil
+ }
+
+ if strings.HasPrefix(filepath.Base(path), ".") {
+ // Skip any dot files
+ if info.IsDir() {
+ return filepath.SkipDir
+ } else {
+ return nil
+ }
+ }
+
+ // The "path" has the src prefixed to it. We need to join our
+ // destination with the path without the src on it.
+ dstPath := filepath.Join(dst, path[len(src):])
+
+ // we don't want to try and copy the same file over itself.
+ if eq, err := sameFile(path, dstPath); eq {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ // If we have a directory, make that subdirectory, then continue
+ // the walk.
+ if info.IsDir() {
+ if path == filepath.Join(src, dst) {
+ // dst is in src; don't walk it.
+ return nil
+ }
+
+ if err := os.MkdirAll(dstPath, 0755); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ // If we have a file, copy the contents.
+ srcF, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ dstF, err := os.Create(dstPath)
+ if err != nil {
+ return err
+ }
+ defer dstF.Close()
+
+ if _, err := io.Copy(dstF, srcF); err != nil {
+ return err
+ }
+
+ // Chmod it
+ return os.Chmod(dstPath, info.Mode())
+ }
+
+ return filepath.Walk(src, walkFn)
+}
+
+// sameFile tried to determine if to paths are the same file.
+// If the paths don't match, we lookup the inode on supported systems.
+func sameFile(a, b string) (bool, error) {
+ if a == b {
+ return true, nil
+ }
+
+ aIno, err := inode(a)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+ }
+
+ bIno, err := inode(b)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+ }
+
+ if aIno > 0 && aIno == bIno {
+ return true, nil
+ }
+
+ return false, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go
new file mode 100644
index 00000000..96b4a63c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/get.go
@@ -0,0 +1,71 @@
+package module
+
+import (
+ "io/ioutil"
+ "os"
+
+ "github.com/hashicorp/go-getter"
+)
+
+// GetMode is an enum that describes how modules are loaded.
+//
+// GetModeLoad says that modules will not be downloaded or updated, they will
+// only be loaded from the storage.
+//
+// GetModeGet says that modules can be initially downloaded if they don't
+// exist, but otherwise to just load from the current version in storage.
+//
+// GetModeUpdate says that modules should be checked for updates and
+// downloaded prior to loading. If there are no updates, we load the version
+// from disk, otherwise we download first and then load.
+type GetMode byte
+
+const (
+ GetModeNone GetMode = iota
+ GetModeGet
+ GetModeUpdate
+)
+
+// GetCopy is the same as Get except that it downloads a copy of the
+// module represented by source.
+//
+// This copy will omit and dot-prefixed files (such as .git/, .hg/) and
+// can't be updated on its own.
+func GetCopy(dst, src string) error {
+ // Create the temporary directory to do the real Get to
+ tmpDir, err := ioutil.TempDir("", "tf")
+ if err != nil {
+ return err
+ }
+ // FIXME: This isn't completely safe. Creating and removing our temp path
+ // exposes where to race to inject files.
+ if err := os.RemoveAll(tmpDir); err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpDir)
+
+ // Get to that temporary dir
+ if err := getter.Get(tmpDir, src); err != nil {
+ return err
+ }
+
+ // Make sure the destination exists
+ if err := os.MkdirAll(dst, 0755); err != nil {
+ return err
+ }
+
+ // Copy to the final location
+ return copyDir(dst, tmpDir)
+}
+
+func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) {
+ // Get the module with the level specified if we were told to.
+ if mode > GetModeNone {
+ if err := s.Get(key, src, mode == GetModeUpdate); err != nil {
+ return "", false, err
+ }
+ }
+
+ // Get the directory where the module is.
+ return s.Dir(key)
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go
new file mode 100644
index 00000000..8603ee26
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode.go
@@ -0,0 +1,21 @@
+// +build linux darwin openbsd netbsd solaris
+
+package module
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+)
+
+// lookup the inode of a file on posix systems
+func inode(path string) (uint64, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ if st, ok := stat.Sys().(*syscall.Stat_t); ok {
+ return st.Ino, nil
+ }
+ return 0, fmt.Errorf("could not determine file inode")
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go
new file mode 100644
index 00000000..0d95730d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go
@@ -0,0 +1,21 @@
+// +build freebsd
+
+package module
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+)
+
+// lookup the inode of a file on posix systems
+func inode(path string) (uint64, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ if st, ok := stat.Sys().(*syscall.Stat_t); ok {
+ return uint64(st.Ino), nil
+ }
+ return 0, fmt.Errorf("could not determine file inode")
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go
new file mode 100644
index 00000000..c0cf4553
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go
@@ -0,0 +1,8 @@
+// +build windows
+
+package module
+
+// no syscall.Stat_t on windows, return 0 for inodes
+func inode(path string) (uint64, error) {
+ return 0, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/module.go b/vendor/github.com/hashicorp/terraform/config/module/module.go
new file mode 100644
index 00000000..f8649f6e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/module.go
@@ -0,0 +1,7 @@
+package module
+
+// Module represents the metadata for a single module.
+type Module struct {
+ Name string
+ Source string
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/testing.go b/vendor/github.com/hashicorp/terraform/config/module/testing.go
new file mode 100644
index 00000000..fc9e7331
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/testing.go
@@ -0,0 +1,38 @@
+package module
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/hashicorp/go-getter"
+)
+
+// TestTree loads a module at the given path and returns the tree as well
+// as a function that should be deferred to clean up resources.
+func TestTree(t *testing.T, path string) (*Tree, func()) {
+ // Create a temporary directory for module storage
+ dir, err := ioutil.TempDir("", "tf")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ return nil, nil
+ }
+
+ // Load the module
+ mod, err := NewTreeModule("", path)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ return nil, nil
+ }
+
+ // Get the child modules
+ s := &getter.FolderStorage{StorageDir: dir}
+ if err := mod.Load(s, GetModeGet); err != nil {
+ t.Fatalf("err: %s", err)
+ return nil, nil
+ }
+
+ return mod, func() {
+ os.RemoveAll(dir)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go
new file mode 100644
index 00000000..b6f90fd9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/tree.go
@@ -0,0 +1,428 @@
+package module
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-getter"
+ "github.com/hashicorp/terraform/config"
+)
+
+// RootName is the name of the root tree.
+const RootName = "root"
+
+// Tree represents the module import tree of configurations.
+//
+// This Tree structure can be used to get (download) new modules, load
+// all the modules without getting, flatten the tree into something
+// Terraform can use, etc.
+type Tree struct {
+ name string
+ config *config.Config
+ children map[string]*Tree
+ path []string
+ lock sync.RWMutex
+}
+
+// NewTree returns a new Tree for the given config structure.
+func NewTree(name string, c *config.Config) *Tree {
+ return &Tree{config: c, name: name}
+}
+
+// NewEmptyTree returns a new tree that is empty (contains no configuration).
+func NewEmptyTree() *Tree {
+ t := &Tree{config: &config.Config{}}
+
+ // We do this dummy load so that the tree is marked as "loaded". It
+ // should never fail because this is just about a no-op. If it does fail
+ // we panic so we can know its a bug.
+ if err := t.Load(nil, GetModeGet); err != nil {
+ panic(err)
+ }
+
+ return t
+}
+
+// NewTreeModule is like NewTree except it parses the configuration in
+// the directory and gives it a specific name. Use a blank name "" to specify
+// the root module.
+func NewTreeModule(name, dir string) (*Tree, error) {
+ c, err := config.LoadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewTree(name, c), nil
+}
+
+// Config returns the configuration for this module.
+func (t *Tree) Config() *config.Config {
+ return t.config
+}
+
+// Child returns the child with the given path (by name).
+func (t *Tree) Child(path []string) *Tree {
+ if t == nil {
+ return nil
+ }
+
+ if len(path) == 0 {
+ return t
+ }
+
+ c := t.Children()[path[0]]
+ if c == nil {
+ return nil
+ }
+
+ return c.Child(path[1:])
+}
+
+// Children returns the children of this tree (the modules that are
+// imported by this root).
+//
+// This will only return a non-nil value after Load is called.
+func (t *Tree) Children() map[string]*Tree {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+ return t.children
+}
+
+// Loaded says whether or not this tree has been loaded or not yet.
+func (t *Tree) Loaded() bool {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+ return t.children != nil
+}
+
+// Modules returns the list of modules that this tree imports.
+//
+// This is only the imports of _this_ level of the tree. To retrieve the
+// full nested imports, you'll have to traverse the tree.
+func (t *Tree) Modules() []*Module {
+ result := make([]*Module, len(t.config.Modules))
+ for i, m := range t.config.Modules {
+ result[i] = &Module{
+ Name: m.Name,
+ Source: m.Source,
+ }
+ }
+
+ return result
+}
+
+// Name returns the name of the tree. This will be "<root>" for the root
+// tree and then the module name given for any children.
+func (t *Tree) Name() string {
+ if t.name == "" {
+ return RootName
+ }
+
+ return t.name
+}
+
+// Load loads the configuration of the entire tree.
+//
+// The parameters are used to tell the tree where to find modules and
+// whether it can download/update modules along the way.
+//
+// Calling this multiple times will reload the tree.
+//
+// Various semantic-like checks are made along the way of loading since
+// module trees inherently require the configuration to be in a reasonably
+// sane state: no circular dependencies, proper module sources, etc. A full
+// suite of validations can be done by running Validate (after loading).
+func (t *Tree) Load(s getter.Storage, mode GetMode) error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ // Reset the children if we have any
+ t.children = nil
+
+ modules := t.Modules()
+ children := make(map[string]*Tree)
+
+ // Go through all the modules and get the directory for them.
+ for _, m := range modules {
+ if _, ok := children[m.Name]; ok {
+ return fmt.Errorf(
+ "module %s: duplicated. module names must be unique", m.Name)
+ }
+
+ // Determine the path to this child
+ path := make([]string, len(t.path), len(t.path)+1)
+ copy(path, t.path)
+ path = append(path, m.Name)
+
+ // Split out the subdir if we have one
+ source, subDir := getter.SourceDirSubdir(m.Source)
+
+ source, err := getter.Detect(source, t.config.Dir, getter.Detectors)
+ if err != nil {
+ return fmt.Errorf("module %s: %s", m.Name, err)
+ }
+
+ // Check if the detector introduced something new.
+ source, subDir2 := getter.SourceDirSubdir(source)
+ if subDir2 != "" {
+ subDir = filepath.Join(subDir2, subDir)
+ }
+
+ // Get the directory where this module is so we can load it
+ key := strings.Join(path, ".")
+ key = fmt.Sprintf("root.%s-%s", key, m.Source)
+ dir, ok, err := getStorage(s, key, source, mode)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf(
+ "module %s: not found, may need to be downloaded using 'terraform get'", m.Name)
+ }
+
+ // If we have a subdirectory, then merge that in
+ if subDir != "" {
+ dir = filepath.Join(dir, subDir)
+ }
+
+ // Load the configurations.Dir(source)
+ children[m.Name], err = NewTreeModule(m.Name, dir)
+ if err != nil {
+ return fmt.Errorf(
+ "module %s: %s", m.Name, err)
+ }
+
+ // Set the path of this child
+ children[m.Name].path = path
+ }
+
+ // Go through all the children and load them.
+ for _, c := range children {
+ if err := c.Load(s, mode); err != nil {
+ return err
+ }
+ }
+
+ // Set our tree up
+ t.children = children
+
+ return nil
+}
+
+// Path is the full path to this tree.
+func (t *Tree) Path() []string {
+ return t.path
+}
+
+// String gives a nice output to describe the tree.
+func (t *Tree) String() string {
+ var result bytes.Buffer
+ path := strings.Join(t.path, ", ")
+ if path != "" {
+ path = fmt.Sprintf(" (path: %s)", path)
+ }
+ result.WriteString(t.Name() + path + "\n")
+
+ cs := t.Children()
+ if cs == nil {
+ result.WriteString(" not loaded")
+ } else {
+ // Go through each child and get its string value, then indent it
+ // by two.
+ for _, c := range cs {
+ r := strings.NewReader(c.String())
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ result.WriteString(" ")
+ result.WriteString(scanner.Text())
+ result.WriteString("\n")
+ }
+ }
+ }
+
+ return result.String()
+}
+
+// Validate does semantic checks on the entire tree of configurations.
+//
+// This will call the respective config.Config.Validate() functions as well
+// as verifying things such as parameters/outputs between the various modules.
+//
+// Load must be called prior to calling Validate or an error will be returned.
+func (t *Tree) Validate() error {
+ if !t.Loaded() {
+ return fmt.Errorf("tree must be loaded before calling Validate")
+ }
+
+ // If something goes wrong, here is our error template
+ newErr := &treeError{Name: []string{t.Name()}}
+
+ // Terraform core does not handle root module children named "root".
+ // We plan to fix this in the future but this bug was brought up in
+ // the middle of a release and we don't want to introduce wide-sweeping
+ // changes at that time.
+ if len(t.path) == 1 && t.name == "root" {
+ return fmt.Errorf("root module cannot contain module named 'root'")
+ }
+
+ // Validate our configuration first.
+ if err := t.config.Validate(); err != nil {
+ newErr.Add(err)
+ }
+
+ // If we're the root, we do extra validation. This validation usually
+ // requires the entire tree (since children don't have parent pointers).
+ if len(t.path) == 0 {
+ if err := t.validateProviderAlias(); err != nil {
+ newErr.Add(err)
+ }
+ }
+
+ // Get the child trees
+ children := t.Children()
+
+ // Validate all our children
+ for _, c := range children {
+ err := c.Validate()
+ if err == nil {
+ continue
+ }
+
+ verr, ok := err.(*treeError)
+ if !ok {
+ // Unknown error, just return...
+ return err
+ }
+
+ // Append ourselves to the error and then return
+ verr.Name = append(verr.Name, t.Name())
+ newErr.AddChild(verr)
+ }
+
+ // Go over all the modules and verify that any parameters are valid
+ // variables into the module in question.
+ for _, m := range t.config.Modules {
+ tree, ok := children[m.Name]
+ if !ok {
+ // This should never happen because Load watches us
+ panic("module not found in children: " + m.Name)
+ }
+
+ // Build the variables that the module defines
+ requiredMap := make(map[string]struct{})
+ varMap := make(map[string]struct{})
+ for _, v := range tree.config.Variables {
+ varMap[v.Name] = struct{}{}
+
+ if v.Required() {
+ requiredMap[v.Name] = struct{}{}
+ }
+ }
+
+ // Compare to the keys in our raw config for the module
+ for k, _ := range m.RawConfig.Raw {
+ if _, ok := varMap[k]; !ok {
+ newErr.Add(fmt.Errorf(
+ "module %s: %s is not a valid parameter",
+ m.Name, k))
+ }
+
+ // Remove the required
+ delete(requiredMap, k)
+ }
+
+ // If we have any required left over, they aren't set.
+ for k, _ := range requiredMap {
+ newErr.Add(fmt.Errorf(
+ "module %s: required variable %q not set",
+ m.Name, k))
+ }
+ }
+
+ // Go over all the variables used and make sure that any module
+ // variables represent outputs properly.
+ for source, vs := range t.config.InterpolatedVariables() {
+ for _, v := range vs {
+ mv, ok := v.(*config.ModuleVariable)
+ if !ok {
+ continue
+ }
+
+ tree, ok := children[mv.Name]
+ if !ok {
+ newErr.Add(fmt.Errorf(
+ "%s: undefined module referenced %s",
+ source, mv.Name))
+ continue
+ }
+
+ found := false
+ for _, o := range tree.config.Outputs {
+ if o.Name == mv.Field {
+ found = true
+ break
+ }
+ }
+ if !found {
+ newErr.Add(fmt.Errorf(
+ "%s: %s is not a valid output for module %s",
+ source, mv.Field, mv.Name))
+ }
+ }
+ }
+
+ return newErr.ErrOrNil()
+}
+
+// treeError is an error use by Tree.Validate to accumulates all
+// validation errors.
+type treeError struct {
+ Name []string
+ Errs []error
+ Children []*treeError
+}
+
+func (e *treeError) Add(err error) {
+ e.Errs = append(e.Errs, err)
+}
+
+func (e *treeError) AddChild(err *treeError) {
+ e.Children = append(e.Children, err)
+}
+
+func (e *treeError) ErrOrNil() error {
+ if len(e.Errs) > 0 || len(e.Children) > 0 {
+ return e
+ }
+ return nil
+}
+
+func (e *treeError) Error() string {
+ name := strings.Join(e.Name, ".")
+ var out bytes.Buffer
+ fmt.Fprintf(&out, "module %s: ", name)
+
+ if len(e.Errs) == 1 {
+ // single like error
+ out.WriteString(e.Errs[0].Error())
+ } else {
+ // multi-line error
+ for _, err := range e.Errs {
+ fmt.Fprintf(&out, "\n %s", err)
+ }
+ }
+
+ if len(e.Children) > 0 {
+ // start the next error on a new line
+ out.WriteString("\n ")
+ }
+ for _, child := range e.Children {
+ out.WriteString(child.Error())
+ }
+
+ return out.String()
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go
new file mode 100644
index 00000000..fcd37f4e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go
@@ -0,0 +1,57 @@
+package module
+
+import (
+ "bytes"
+ "encoding/gob"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+func (t *Tree) GobDecode(bs []byte) error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ // Decode the gob data
+ var data treeGob
+ dec := gob.NewDecoder(bytes.NewReader(bs))
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ // Set the fields
+ t.name = data.Name
+ t.config = data.Config
+ t.children = data.Children
+ t.path = data.Path
+
+ return nil
+}
+
+func (t *Tree) GobEncode() ([]byte, error) {
+ data := &treeGob{
+ Config: t.config,
+ Children: t.children,
+ Name: t.name,
+ Path: t.path,
+ }
+
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ if err := enc.Encode(data); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+// treeGob is used as a structure to Gob encode a tree.
+//
+// This structure is private so it can't be referenced but the fields are
+// public, allowing Gob to properly encode this. When we decode this, we are
+// able to turn it into a Tree.
+type treeGob struct {
+ Config *config.Config
+ Children map[string]*Tree
+ Name string
+ Path []string
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go
new file mode 100644
index 00000000..090d4f7e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go
@@ -0,0 +1,118 @@
+package module
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// validateProviderAlias validates that all provider alias references are
+// defined at some point in the parent tree. This improves UX by catching
+// alias typos at the slight cost of requiring a declaration of usage. This
+// is usually a good tradeoff since not many aliases are used.
+func (t *Tree) validateProviderAlias() error {
+ // If we're not the root, don't perform this validation. We must be the
+ // root since we require full tree visibilty.
+ if len(t.path) != 0 {
+ return nil
+ }
+
+ // We'll use a graph to keep track of defined aliases at each level.
+ // As long as a parent defines an alias, it is okay.
+ var g dag.AcyclicGraph
+ t.buildProviderAliasGraph(&g, nil)
+
+ // Go through the graph and check that the usage is all good.
+ var err error
+ for _, v := range g.Vertices() {
+ pv, ok := v.(*providerAliasVertex)
+ if !ok {
+ // This shouldn't happen, just ignore it.
+ continue
+ }
+
+ // If we're not using any aliases, fast track and just continue
+ if len(pv.Used) == 0 {
+ continue
+ }
+
+ // Grab the ancestors since we're going to have to check if our
+ // parents define any of our aliases.
+ var parents []*providerAliasVertex
+ ancestors, _ := g.Ancestors(v)
+ for _, raw := range ancestors.List() {
+ if pv, ok := raw.(*providerAliasVertex); ok {
+ parents = append(parents, pv)
+ }
+ }
+ for k, _ := range pv.Used {
+ // Check if we define this
+ if _, ok := pv.Defined[k]; ok {
+ continue
+ }
+
+ // Check for a parent
+ found := false
+ for _, parent := range parents {
+ _, found = parent.Defined[k]
+ if found {
+ break
+ }
+ }
+ if found {
+ continue
+ }
+
+ // We didn't find the alias, error!
+ err = multierror.Append(err, fmt.Errorf(
+ "module %s: provider alias must be defined by the module or a parent: %s",
+ strings.Join(pv.Path, "."), k))
+ }
+ }
+
+ return err
+}
+
+func (t *Tree) buildProviderAliasGraph(g *dag.AcyclicGraph, parent dag.Vertex) {
+ // Add all our defined aliases
+ defined := make(map[string]struct{})
+ for _, p := range t.config.ProviderConfigs {
+ defined[p.FullName()] = struct{}{}
+ }
+
+ // Add all our used aliases
+ used := make(map[string]struct{})
+ for _, r := range t.config.Resources {
+ if r.Provider != "" {
+ used[r.Provider] = struct{}{}
+ }
+ }
+
+ // Add it to the graph
+ vertex := &providerAliasVertex{
+ Path: t.Path(),
+ Defined: defined,
+ Used: used,
+ }
+ g.Add(vertex)
+
+ // Connect to our parent if we have one
+ if parent != nil {
+ g.Connect(dag.BasicEdge(vertex, parent))
+ }
+
+ // Build all our children
+ for _, c := range t.Children() {
+ c.buildProviderAliasGraph(g, vertex)
+ }
+}
+
+// providerAliasVertex is the vertex for the graph that keeps track of
+// defined provider aliases.
+type providerAliasVertex struct {
+ Path []string
+ Defined map[string]struct{}
+ Used map[string]struct{}
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go
new file mode 100644
index 00000000..00fd43fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go
@@ -0,0 +1,40 @@
+package config
+
+// ProvisionerWhen is an enum for valid values for when to run provisioners.
+type ProvisionerWhen int
+
+const (
+ ProvisionerWhenInvalid ProvisionerWhen = iota
+ ProvisionerWhenCreate
+ ProvisionerWhenDestroy
+)
+
+var provisionerWhenStrs = map[ProvisionerWhen]string{
+ ProvisionerWhenInvalid: "invalid",
+ ProvisionerWhenCreate: "create",
+ ProvisionerWhenDestroy: "destroy",
+}
+
+func (v ProvisionerWhen) String() string {
+ return provisionerWhenStrs[v]
+}
+
+// ProvisionerOnFailure is an enum for valid values for on_failure options
+// for provisioners.
+type ProvisionerOnFailure int
+
+const (
+ ProvisionerOnFailureInvalid ProvisionerOnFailure = iota
+ ProvisionerOnFailureContinue
+ ProvisionerOnFailureFail
+)
+
+var provisionerOnFailureStrs = map[ProvisionerOnFailure]string{
+ ProvisionerOnFailureInvalid: "invalid",
+ ProvisionerOnFailureContinue: "continue",
+ ProvisionerOnFailureFail: "fail",
+}
+
+func (v ProvisionerOnFailure) String() string {
+ return provisionerOnFailureStrs[v]
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go
new file mode 100644
index 00000000..f8498d85
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/raw_config.go
@@ -0,0 +1,335 @@
+package config
+
+import (
+ "bytes"
+ "encoding/gob"
+ "sync"
+
+ "github.com/hashicorp/hil"
+ "github.com/hashicorp/hil/ast"
+ "github.com/mitchellh/copystructure"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// UnknownVariableValue is a sentinel value that can be used
+// to denote that the value of a variable is unknown at this time.
+// RawConfig uses this information to build up data about
+// unknown keys.
+const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
+
+// RawConfig is a structure that holds a piece of configuration
+// where the overall structure is unknown since it will be used
+// to configure a plugin or some other similar external component.
+//
+// RawConfigs can be interpolated with variables that come from
+// other resources, user variables, etc.
+//
+// RawConfig supports a query-like interface to request
+// information from deep within the structure.
+type RawConfig struct {
+ Key string
+ Raw map[string]interface{}
+ Interpolations []ast.Node
+ Variables map[string]InterpolatedVariable
+
+ lock sync.Mutex
+ config map[string]interface{}
+ unknownKeys []string
+}
+
+// NewRawConfig creates a new RawConfig structure and populates the
+// publicly readable struct fields.
+func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) {
+ result := &RawConfig{Raw: raw}
+ if err := result.init(); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// RawMap returns a copy of the RawConfig.Raw map.
+func (r *RawConfig) RawMap() map[string]interface{} {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ m := make(map[string]interface{})
+ for k, v := range r.Raw {
+ m[k] = v
+ }
+ return m
+}
+
+// Copy returns a copy of this RawConfig, uninterpolated.
+func (r *RawConfig) Copy() *RawConfig {
+ if r == nil {
+ return nil
+ }
+
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ newRaw := make(map[string]interface{})
+ for k, v := range r.Raw {
+ newRaw[k] = v
+ }
+
+ result, err := NewRawConfig(newRaw)
+ if err != nil {
+ panic("copy failed: " + err.Error())
+ }
+
+ result.Key = r.Key
+ return result
+}
+
+// Value returns the value of the configuration if this configuration
+// has a Key set. If this does not have a Key set, nil will be returned.
+func (r *RawConfig) Value() interface{} {
+ if c := r.Config(); c != nil {
+ if v, ok := c[r.Key]; ok {
+ return v
+ }
+ }
+
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ return r.Raw[r.Key]
+}
+
+// Config returns the entire configuration with the variables
+// interpolated from any call to Interpolate.
+//
+// If any interpolated variables are unknown (value set to
+// UnknownVariableValue), the first non-container (map, slice, etc.) element
+// will be removed from the config. The keys of unknown variables
+// can be found using the UnknownKeys function.
+//
+// By pruning out unknown keys from the configuration, the raw
+// structure will always successfully decode into its ultimate
+// structure using something like mapstructure.
+func (r *RawConfig) Config() map[string]interface{} {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ return r.config
+}
+
+// Interpolate uses the given mapping of variable values and uses
+// those as the values to replace any variables in this raw
+// configuration.
+//
+// Any prior calls to Interpolate are replaced with this one.
+//
+// If a variable key is missing, this will panic.
+func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ config := langEvalConfig(vs)
+ return r.interpolate(func(root ast.Node) (interface{}, error) {
+ // None of the variables we need are computed, meaning we should
+ // be able to properly evaluate.
+ result, err := hil.Eval(root, config)
+ if err != nil {
+ return "", err
+ }
+
+ return result.Value, nil
+ })
+}
+
+// Merge merges another RawConfig into this one (overriding any conflicting
+// values in this config) and returns a new config. The original config
+// is not modified.
+func (r *RawConfig) Merge(other *RawConfig) *RawConfig {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ // Merge the raw configurations
+ raw := make(map[string]interface{})
+ for k, v := range r.Raw {
+ raw[k] = v
+ }
+ for k, v := range other.Raw {
+ raw[k] = v
+ }
+
+ // Create the result
+ result, err := NewRawConfig(raw)
+ if err != nil {
+ panic(err)
+ }
+
+ // Merge the interpolated results
+ result.config = make(map[string]interface{})
+ for k, v := range r.config {
+ result.config[k] = v
+ }
+ for k, v := range other.config {
+ result.config[k] = v
+ }
+
+ // Build the unknown keys
+ if len(r.unknownKeys) > 0 || len(other.unknownKeys) > 0 {
+ unknownKeys := make(map[string]struct{})
+ for _, k := range r.unknownKeys {
+ unknownKeys[k] = struct{}{}
+ }
+ for _, k := range other.unknownKeys {
+ unknownKeys[k] = struct{}{}
+ }
+
+ result.unknownKeys = make([]string, 0, len(unknownKeys))
+ for k, _ := range unknownKeys {
+ result.unknownKeys = append(result.unknownKeys, k)
+ }
+ }
+
+ return result
+}
+
+func (r *RawConfig) init() error {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ r.config = r.Raw
+ r.Interpolations = nil
+ r.Variables = nil
+
+ fn := func(node ast.Node) (interface{}, error) {
+ r.Interpolations = append(r.Interpolations, node)
+ vars, err := DetectVariables(node)
+ if err != nil {
+ return "", err
+ }
+
+ for _, v := range vars {
+ if r.Variables == nil {
+ r.Variables = make(map[string]InterpolatedVariable)
+ }
+
+ r.Variables[v.FullKey()] = v
+ }
+
+ return "", nil
+ }
+
+ walker := &interpolationWalker{F: fn}
+ if err := reflectwalk.Walk(r.Raw, walker); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error {
+ config, err := copystructure.Copy(r.Raw)
+ if err != nil {
+ return err
+ }
+ r.config = config.(map[string]interface{})
+
+ w := &interpolationWalker{F: fn, Replace: true}
+ err = reflectwalk.Walk(r.config, w)
+ if err != nil {
+ return err
+ }
+
+ r.unknownKeys = w.unknownKeys
+ return nil
+}
+
+func (r *RawConfig) merge(r2 *RawConfig) *RawConfig {
+ if r == nil && r2 == nil {
+ return nil
+ }
+
+ if r == nil {
+ r = &RawConfig{}
+ }
+
+ rawRaw, err := copystructure.Copy(r.Raw)
+ if err != nil {
+ panic(err)
+ }
+
+ raw := rawRaw.(map[string]interface{})
+ if r2 != nil {
+ for k, v := range r2.Raw {
+ raw[k] = v
+ }
+ }
+
+ result, err := NewRawConfig(raw)
+ if err != nil {
+ panic(err)
+ }
+
+ return result
+}
+
+// UnknownKeys returns the keys of the configuration that are unknown
+// because they had interpolated variables that must be computed.
+func (r *RawConfig) UnknownKeys() []string {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ return r.unknownKeys
+}
+
+// See GobEncode
+func (r *RawConfig) GobDecode(b []byte) error {
+ var data gobRawConfig
+ err := gob.NewDecoder(bytes.NewReader(b)).Decode(&data)
+ if err != nil {
+ return err
+ }
+
+ r.Key = data.Key
+ r.Raw = data.Raw
+
+ return r.init()
+}
+
+// GobEncode is a custom Gob encoder to use so that we only include the
+// raw configuration. Interpolated variables and such are lost and the
+// tree of interpolated variables is recomputed on decode, since it is
+// referentially transparent.
+func (r *RawConfig) GobEncode() ([]byte, error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ data := gobRawConfig{
+ Key: r.Key,
+ Raw: r.Raw,
+ }
+
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(data); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+type gobRawConfig struct {
+ Key string
+ Raw map[string]interface{}
+}
+
+// langEvalConfig returns the evaluation configuration we use to execute.
+func langEvalConfig(vs map[string]ast.Variable) *hil.EvalConfig {
+ funcMap := make(map[string]ast.Function)
+ for k, v := range Funcs() {
+ funcMap[k] = v
+ }
+ funcMap["lookup"] = interpolationFuncLookup(vs)
+ funcMap["keys"] = interpolationFuncKeys(vs)
+ funcMap["values"] = interpolationFuncValues(vs)
+
+ return &hil.EvalConfig{
+ GlobalScope: &ast.BasicScope{
+ VarMap: vs,
+ FuncMap: funcMap,
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode.go b/vendor/github.com/hashicorp/terraform/config/resource_mode.go
new file mode 100644
index 00000000..877c6e84
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/resource_mode.go
@@ -0,0 +1,9 @@
+package config
+
+//go:generate stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go
+type ResourceMode int
+
+const (
+ ManagedResourceMode ResourceMode = iota
+ DataResourceMode
+)
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
new file mode 100644
index 00000000..ea68b4fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT.
+
+package config
+
+import "fmt"
+
+const _ResourceMode_name = "ManagedResourceModeDataResourceMode"
+
+var _ResourceMode_index = [...]uint8{0, 19, 35}
+
+func (i ResourceMode) String() string {
+ if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) {
+ return fmt.Sprintf("ResourceMode(%d)", i)
+ }
+ return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go
new file mode 100644
index 00000000..f7bfadd9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/testing.go
@@ -0,0 +1,15 @@
+package config
+
+import (
+ "testing"
+)
+
+// TestRawConfig is used to create a RawConfig for testing.
+func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig {
+ cfg, err := NewRawConfig(c)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ return cfg
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go
new file mode 100644
index 00000000..f8776bc5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/dag.go
@@ -0,0 +1,286 @@
+package dag
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+)
+
+// AcyclicGraph is a specialization of Graph that cannot have cycles. With
+// this property, we get the property of sane graph traversal.
+type AcyclicGraph struct {
+ Graph
+}
+
+// WalkFunc is the callback used for walking the graph.
+type WalkFunc func(Vertex) error
+
+// DepthWalkFunc is a walk function that also receives the current depth of the
+// walk as an argument
+type DepthWalkFunc func(Vertex, int) error
+
+func (g *AcyclicGraph) DirectedGraph() Grapher {
+ return g
+}
+
+// Returns a Set that includes every Vertex yielded by walking down from the
+// provided starting Vertex v.
+func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) {
+ s := new(Set)
+ start := AsVertexList(g.DownEdges(v))
+ memoFunc := func(v Vertex, d int) error {
+ s.Add(v)
+ return nil
+ }
+
+ if err := g.DepthFirstWalk(start, memoFunc); err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+// Returns a Set that includes every Vertex yielded by walking up from the
+// provided starting Vertex v.
+func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) {
+ s := new(Set)
+ start := AsVertexList(g.UpEdges(v))
+ memoFunc := func(v Vertex, d int) error {
+ s.Add(v)
+ return nil
+ }
+
+ if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+// Root returns the root of the DAG, or an error.
+//
+// Complexity: O(V)
+func (g *AcyclicGraph) Root() (Vertex, error) {
+ roots := make([]Vertex, 0, 1)
+ for _, v := range g.Vertices() {
+ if g.UpEdges(v).Len() == 0 {
+ roots = append(roots, v)
+ }
+ }
+
+ if len(roots) > 1 {
+ // TODO(mitchellh): make this error message a lot better
+ return nil, fmt.Errorf("multiple roots: %#v", roots)
+ }
+
+ if len(roots) == 0 {
+ return nil, fmt.Errorf("no roots found")
+ }
+
+ return roots[0], nil
+}
+
+// TransitiveReduction performs the transitive reduction of graph g in place.
+// The transitive reduction of a graph is a graph with as few edges as
+// possible with the same reachability as the original graph. This means
+// that if there are three nodes A => B => C, and A connects to both
+// B and C, and B connects to C, then the transitive reduction is the
+// same graph with only a single edge between A and B, and a single edge
+// between B and C.
+//
+// The graph must be valid for this operation to behave properly. If
+// Validate() returns an error, the behavior is undefined and the results
+// will likely be unexpected.
+//
+// Complexity: O(V(V+E)), or asymptotically O(VE)
+func (g *AcyclicGraph) TransitiveReduction() {
+ // For each vertex u in graph g, do a DFS starting from each vertex
+ // v such that the edge (u,v) exists (v is a direct descendant of u).
+ //
+ // For each v-prime reachable from v, remove the edge (u, v-prime).
+ defer g.debug.BeginOperation("TransitiveReduction", "").End("")
+
+ for _, u := range g.Vertices() {
+ uTargets := g.DownEdges(u)
+ vs := AsVertexList(g.DownEdges(u))
+
+ g.DepthFirstWalk(vs, func(v Vertex, d int) error {
+ shared := uTargets.Intersection(g.DownEdges(v))
+ for _, vPrime := range AsVertexList(shared) {
+ g.RemoveEdge(BasicEdge(u, vPrime))
+ }
+
+ return nil
+ })
+ }
+}
+
+// Validate validates the DAG. A DAG is valid if it has a single root
+// with no cycles.
+func (g *AcyclicGraph) Validate() error {
+ if _, err := g.Root(); err != nil {
+ return err
+ }
+
+ // Look for cycles of more than 1 component
+ var err error
+ cycles := g.Cycles()
+ if len(cycles) > 0 {
+ for _, cycle := range cycles {
+ cycleStr := make([]string, len(cycle))
+ for j, vertex := range cycle {
+ cycleStr[j] = VertexName(vertex)
+ }
+
+ err = multierror.Append(err, fmt.Errorf(
+ "Cycle: %s", strings.Join(cycleStr, ", ")))
+ }
+ }
+
+ // Look for cycles to self
+ for _, e := range g.Edges() {
+ if e.Source() == e.Target() {
+ err = multierror.Append(err, fmt.Errorf(
+ "Self reference: %s", VertexName(e.Source())))
+ }
+ }
+
+ return err
+}
+
+func (g *AcyclicGraph) Cycles() [][]Vertex {
+ var cycles [][]Vertex
+ for _, cycle := range StronglyConnected(&g.Graph) {
+ if len(cycle) > 1 {
+ cycles = append(cycles, cycle)
+ }
+ }
+ return cycles
+}
+
+// Walk walks the graph, calling your callback as each node is visited.
+// This will walk nodes in parallel if it can. Because the walk is done
+// in parallel, the error returned will be a multierror.
+func (g *AcyclicGraph) Walk(cb WalkFunc) error {
+ defer g.debug.BeginOperation(typeWalk, "").End("")
+
+ w := &Walker{Callback: cb, Reverse: true}
+ w.Update(g)
+ return w.Wait()
+}
+
+// simple convenience helper for converting a dag.Set to a []Vertex
+func AsVertexList(s *Set) []Vertex {
+ rawList := s.List()
+ vertexList := make([]Vertex, len(rawList))
+ for i, raw := range rawList {
+ vertexList[i] = raw.(Vertex)
+ }
+ return vertexList
+}
+
+type vertexAtDepth struct {
+ Vertex Vertex
+ Depth int
+}
+
+// depthFirstWalk does a depth-first walk of the graph starting from
+// the vertices in start. This is not exported now but it would make sense
+// to export this publicly at some point.
+func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error {
+ defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("")
+
+ seen := make(map[Vertex]struct{})
+ frontier := make([]*vertexAtDepth, len(start))
+ for i, v := range start {
+ frontier[i] = &vertexAtDepth{
+ Vertex: v,
+ Depth: 0,
+ }
+ }
+ for len(frontier) > 0 {
+ // Pop the current vertex
+ n := len(frontier)
+ current := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ // Check if we've seen this already and return...
+ if _, ok := seen[current.Vertex]; ok {
+ continue
+ }
+ seen[current.Vertex] = struct{}{}
+
+ // Visit the current node
+ if err := f(current.Vertex, current.Depth); err != nil {
+ return err
+ }
+
+ // Visit targets of this in a consistent order.
+ targets := AsVertexList(g.DownEdges(current.Vertex))
+ sort.Sort(byVertexName(targets))
+ for _, t := range targets {
+ frontier = append(frontier, &vertexAtDepth{
+ Vertex: t,
+ Depth: current.Depth + 1,
+ })
+ }
+ }
+
+ return nil
+}
+
+// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from
+// the vertices in start.
+func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error {
+ defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("")
+
+ seen := make(map[Vertex]struct{})
+ frontier := make([]*vertexAtDepth, len(start))
+ for i, v := range start {
+ frontier[i] = &vertexAtDepth{
+ Vertex: v,
+ Depth: 0,
+ }
+ }
+ for len(frontier) > 0 {
+ // Pop the current vertex
+ n := len(frontier)
+ current := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ // Check if we've seen this already and return...
+ if _, ok := seen[current.Vertex]; ok {
+ continue
+ }
+ seen[current.Vertex] = struct{}{}
+
+ // Add next set of targets in a consistent order.
+ targets := AsVertexList(g.UpEdges(current.Vertex))
+ sort.Sort(byVertexName(targets))
+ for _, t := range targets {
+ frontier = append(frontier, &vertexAtDepth{
+ Vertex: t,
+ Depth: current.Depth + 1,
+ })
+ }
+
+ // Visit the current node
+ if err := f(current.Vertex, current.Depth); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// byVertexName implements sort.Interface so a list of Vertices can be sorted
+// consistently by their VertexName
+type byVertexName []Vertex
+
+func (b byVertexName) Len() int { return len(b) }
+func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byVertexName) Less(i, j int) bool {
+ return VertexName(b[i]) < VertexName(b[j])
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/dot.go b/vendor/github.com/hashicorp/terraform/dag/dot.go
new file mode 100644
index 00000000..7e6d2af3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/dot.go
@@ -0,0 +1,282 @@
+package dag
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// DotOpts are the options for generating a dot formatted Graph.
+type DotOpts struct {
+ // Allows some nodes to decide to only show themselves when the user has
+ // requested the "verbose" graph.
+ Verbose bool
+
+ // Highlight Cycles
+ DrawCycles bool
+
+ // How many levels to expand modules as we draw
+ MaxDepth int
+
+ // use this to keep the cluster_ naming convention from the previous dot writer
+ cluster bool
+}
+
+// GraphNodeDotter can be implemented by a node to cause it to be included
+// in the dot graph. The Dot method will be called which is expected to
+// return a representation of this node.
+type GraphNodeDotter interface {
+ // Dot is called to return the dot formatting for the node.
+ // The first parameter is the title of the node.
+ // The second parameter includes user-specified options that affect the dot
+ // graph. See GraphDotOpts below for details.
+ DotNode(string, *DotOpts) *DotNode
+}
+
+// DotNode provides a structure for Vertices to return in order to specify their
+// dot format.
+type DotNode struct {
+ Name string
+ Attrs map[string]string
+}
+
+// Returns the DOT representation of this Graph.
+func (g *marshalGraph) Dot(opts *DotOpts) []byte {
+ if opts == nil {
+ opts = &DotOpts{
+ DrawCycles: true,
+ MaxDepth: -1,
+ Verbose: true,
+ }
+ }
+
+ var w indentWriter
+ w.WriteString("digraph {\n")
+ w.Indent()
+
+ // some dot defaults
+ w.WriteString(`compound = "true"` + "\n")
+ w.WriteString(`newrank = "true"` + "\n")
+
+ // the top level graph is written as the first subgraph
+ w.WriteString(`subgraph "root" {` + "\n")
+ g.writeBody(opts, &w)
+
+ // cluster isn't really used other than for naming purposes in some graphs
+ opts.cluster = opts.MaxDepth != 0
+ maxDepth := opts.MaxDepth
+ if maxDepth == 0 {
+ maxDepth = -1
+ }
+
+ for _, s := range g.Subgraphs {
+ g.writeSubgraph(s, opts, maxDepth, &w)
+ }
+
+ w.Unindent()
+ w.WriteString("}\n")
+ return w.Bytes()
+}
+
+func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte {
+ var buf bytes.Buffer
+ graphName := g.Name
+ if graphName == "" {
+ graphName = "root"
+ }
+
+ name := v.Name
+ attrs := v.Attrs
+ if v.graphNodeDotter != nil {
+ node := v.graphNodeDotter.DotNode(name, opts)
+ if node == nil {
+ return []byte{}
+ }
+
+ newAttrs := make(map[string]string)
+ for k, v := range attrs {
+ newAttrs[k] = v
+ }
+ for k, v := range node.Attrs {
+ newAttrs[k] = v
+ }
+
+ name = node.Name
+ attrs = newAttrs
+ }
+
+ buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name))
+ writeAttrs(&buf, attrs)
+ buf.WriteByte('\n')
+
+ return buf.Bytes()
+}
+
+func (e *marshalEdge) dot(g *marshalGraph) string {
+ var buf bytes.Buffer
+ graphName := g.Name
+ if graphName == "" {
+ graphName = "root"
+ }
+
+ sourceName := g.vertexByID(e.Source).Name
+ targetName := g.vertexByID(e.Target).Name
+ s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName)
+ buf.WriteString(s)
+ writeAttrs(&buf, e.Attrs)
+
+ return buf.String()
+}
+
+func cycleDot(e *marshalEdge, g *marshalGraph) string {
+ return e.dot(g) + ` [color = "red", penwidth = "2.0"]`
+}
+
+// Write the subgraph body. The is recursive, and the depth argument is used to
+// record the current depth of iteration.
+func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) {
+ if depth == 0 {
+ return
+ }
+ depth--
+
+ name := sg.Name
+ if opts.cluster {
+ // we prefix with cluster_ to match the old dot output
+ name = "cluster_" + name
+ sg.Attrs["label"] = sg.Name
+ }
+ w.WriteString(fmt.Sprintf("subgraph %q {\n", name))
+ sg.writeBody(opts, w)
+
+ for _, sg := range sg.Subgraphs {
+ g.writeSubgraph(sg, opts, depth, w)
+ }
+}
+
+func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) {
+ w.Indent()
+
+ for _, as := range attrStrings(g.Attrs) {
+ w.WriteString(as + "\n")
+ }
+
+ // list of Vertices that aren't to be included in the dot output
+ skip := map[string]bool{}
+
+ for _, v := range g.Vertices {
+ if v.graphNodeDotter == nil {
+ skip[v.ID] = true
+ continue
+ }
+
+ w.Write(v.dot(g, opts))
+ }
+
+ var dotEdges []string
+
+ if opts.DrawCycles {
+ for _, c := range g.Cycles {
+ if len(c) < 2 {
+ continue
+ }
+
+ for i, j := 0, 1; i < len(c); i, j = i+1, j+1 {
+ if j >= len(c) {
+ j = 0
+ }
+ src := c[i]
+ tgt := c[j]
+
+ if skip[src.ID] || skip[tgt.ID] {
+ continue
+ }
+
+ e := &marshalEdge{
+ Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name),
+ Source: src.ID,
+ Target: tgt.ID,
+ Attrs: make(map[string]string),
+ }
+
+ dotEdges = append(dotEdges, cycleDot(e, g))
+ src = tgt
+ }
+ }
+ }
+
+ for _, e := range g.Edges {
+ dotEdges = append(dotEdges, e.dot(g))
+ }
+
+ // srot these again to match the old output
+ sort.Strings(dotEdges)
+
+ for _, e := range dotEdges {
+ w.WriteString(e + "\n")
+ }
+
+ w.Unindent()
+ w.WriteString("}\n")
+}
+
+func writeAttrs(buf *bytes.Buffer, attrs map[string]string) {
+ if len(attrs) > 0 {
+ buf.WriteString(" [")
+ buf.WriteString(strings.Join(attrStrings(attrs), ", "))
+ buf.WriteString("]")
+ }
+}
+
+func attrStrings(attrs map[string]string) []string {
+ strings := make([]string, 0, len(attrs))
+ for k, v := range attrs {
+ strings = append(strings, fmt.Sprintf("%s = %q", k, v))
+ }
+ sort.Strings(strings)
+ return strings
+}
+
+// Provide a bytes.Buffer like structure, which will indent when starting a
+// newline.
+type indentWriter struct {
+ bytes.Buffer
+ level int
+}
+
+func (w *indentWriter) indent() {
+ newline := []byte("\n")
+ if !bytes.HasSuffix(w.Bytes(), newline) {
+ return
+ }
+ for i := 0; i < w.level; i++ {
+ w.Buffer.WriteString("\t")
+ }
+}
+
+// Indent increases indentation by 1
+func (w *indentWriter) Indent() { w.level++ }
+
+// Unindent decreases indentation by 1
+func (w *indentWriter) Unindent() { w.level-- }
+
+// the following methods intercecpt the byte.Buffer writes and insert the
+// indentation when starting a new line.
+func (w *indentWriter) Write(b []byte) (int, error) {
+ w.indent()
+ return w.Buffer.Write(b)
+}
+
+func (w *indentWriter) WriteString(s string) (int, error) {
+ w.indent()
+ return w.Buffer.WriteString(s)
+}
+func (w *indentWriter) WriteByte(b byte) error {
+ w.indent()
+ return w.Buffer.WriteByte(b)
+}
+func (w *indentWriter) WriteRune(r rune) (int, error) {
+ w.indent()
+ return w.Buffer.WriteRune(r)
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/edge.go b/vendor/github.com/hashicorp/terraform/dag/edge.go
new file mode 100644
index 00000000..f0d99ee3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/edge.go
@@ -0,0 +1,37 @@
+package dag
+
+import (
+ "fmt"
+)
+
+// Edge represents an edge in the graph, with a source and target vertex.
+type Edge interface {
+ Source() Vertex
+ Target() Vertex
+
+ Hashable
+}
+
+// BasicEdge returns an Edge implementation that simply tracks the source
+// and target given as-is.
+func BasicEdge(source, target Vertex) Edge {
+ return &basicEdge{S: source, T: target}
+}
+
+// basicEdge is a basic implementation of Edge that has the source and
+// target vertex.
+type basicEdge struct {
+ S, T Vertex
+}
+
+func (e *basicEdge) Hashcode() interface{} {
+ return fmt.Sprintf("%p-%p", e.S, e.T)
+}
+
+func (e *basicEdge) Source() Vertex {
+ return e.S
+}
+
+func (e *basicEdge) Target() Vertex {
+ return e.T
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/graph.go b/vendor/github.com/hashicorp/terraform/dag/graph.go
new file mode 100644
index 00000000..e7517a20
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/graph.go
@@ -0,0 +1,391 @@
+package dag
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+)
+
+// Graph is used to represent a dependency graph.
+type Graph struct {
+ vertices *Set
+ edges *Set
+ downEdges map[interface{}]*Set
+ upEdges map[interface{}]*Set
+
+ // JSON encoder for recording debug information
+ debug *encoder
+}
+
+// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher.
+type Subgrapher interface {
+ Subgraph() Grapher
+}
+
+// A Grapher is any type that returns a Grapher, mainly used to identify
+// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they
+// return themselves.
+type Grapher interface {
+ DirectedGraph() Grapher
+}
+
+// Vertex of the graph.
+type Vertex interface{}
+
+// NamedVertex is an optional interface that can be implemented by Vertex
+// to give it a human-friendly name that is used for outputting the graph.
+type NamedVertex interface {
+ Vertex
+ Name() string
+}
+
+func (g *Graph) DirectedGraph() Grapher {
+ return g
+}
+
+// Vertices returns the list of all the vertices in the graph.
+func (g *Graph) Vertices() []Vertex {
+ list := g.vertices.List()
+ result := make([]Vertex, len(list))
+ for i, v := range list {
+ result[i] = v.(Vertex)
+ }
+
+ return result
+}
+
+// Edges returns the list of all the edges in the graph.
+func (g *Graph) Edges() []Edge {
+ list := g.edges.List()
+ result := make([]Edge, len(list))
+ for i, v := range list {
+ result[i] = v.(Edge)
+ }
+
+ return result
+}
+
+// EdgesFrom returns the list of edges from the given source.
+func (g *Graph) EdgesFrom(v Vertex) []Edge {
+ var result []Edge
+ from := hashcode(v)
+ for _, e := range g.Edges() {
+ if hashcode(e.Source()) == from {
+ result = append(result, e)
+ }
+ }
+
+ return result
+}
+
+// EdgesTo returns the list of edges to the given target.
+func (g *Graph) EdgesTo(v Vertex) []Edge {
+ var result []Edge
+ search := hashcode(v)
+ for _, e := range g.Edges() {
+ if hashcode(e.Target()) == search {
+ result = append(result, e)
+ }
+ }
+
+ return result
+}
+
+// HasVertex checks if the given Vertex is present in the graph.
+func (g *Graph) HasVertex(v Vertex) bool {
+ return g.vertices.Include(v)
+}
+
+// HasEdge checks if the given Edge is present in the graph.
+func (g *Graph) HasEdge(e Edge) bool {
+ return g.edges.Include(e)
+}
+
+// Add adds a vertex to the graph. This is safe to call multiple time with
+// the same Vertex.
+func (g *Graph) Add(v Vertex) Vertex {
+ g.init()
+ g.vertices.Add(v)
+ g.debug.Add(v)
+ return v
+}
+
+// Remove removes a vertex from the graph. This will also remove any
+// edges with this vertex as a source or target.
+func (g *Graph) Remove(v Vertex) Vertex {
+ // Delete the vertex itself
+ g.vertices.Delete(v)
+ g.debug.Remove(v)
+
+ // Delete the edges to non-existent things
+ for _, target := range g.DownEdges(v).List() {
+ g.RemoveEdge(BasicEdge(v, target))
+ }
+ for _, source := range g.UpEdges(v).List() {
+ g.RemoveEdge(BasicEdge(source, v))
+ }
+
+ return nil
+}
+
+// Replace replaces the original Vertex with replacement. If the original
+// does not exist within the graph, then false is returned. Otherwise, true
+// is returned.
+func (g *Graph) Replace(original, replacement Vertex) bool {
+ // If we don't have the original, we can't do anything
+ if !g.vertices.Include(original) {
+ return false
+ }
+
+ defer g.debug.BeginOperation("Replace", "").End("")
+
+ // If they're the same, then don't do anything
+ if original == replacement {
+ return true
+ }
+
+ // Add our new vertex, then copy all the edges
+ g.Add(replacement)
+ for _, target := range g.DownEdges(original).List() {
+ g.Connect(BasicEdge(replacement, target))
+ }
+ for _, source := range g.UpEdges(original).List() {
+ g.Connect(BasicEdge(source, replacement))
+ }
+
+ // Remove our old vertex, which will also remove all the edges
+ g.Remove(original)
+
+ return true
+}
+
+// RemoveEdge removes an edge from the graph.
+func (g *Graph) RemoveEdge(edge Edge) {
+ g.init()
+ g.debug.RemoveEdge(edge)
+
+ // Delete the edge from the set
+ g.edges.Delete(edge)
+
+ // Delete the up/down edges
+ if s, ok := g.downEdges[hashcode(edge.Source())]; ok {
+ s.Delete(edge.Target())
+ }
+ if s, ok := g.upEdges[hashcode(edge.Target())]; ok {
+ s.Delete(edge.Source())
+ }
+}
+
+// DownEdges returns the outward edges from the source Vertex v.
+func (g *Graph) DownEdges(v Vertex) *Set {
+ g.init()
+ return g.downEdges[hashcode(v)]
+}
+
+// UpEdges returns the inward edges to the destination Vertex v.
+func (g *Graph) UpEdges(v Vertex) *Set {
+ g.init()
+ return g.upEdges[hashcode(v)]
+}
+
+// Connect adds an edge with the given source and target. This is safe to
+// call multiple times with the same value. Note that the same value is
+// verified through pointer equality of the vertices, not through the
+// value of the edge itself.
+func (g *Graph) Connect(edge Edge) {
+ g.init()
+ g.debug.Connect(edge)
+
+ source := edge.Source()
+ target := edge.Target()
+ sourceCode := hashcode(source)
+ targetCode := hashcode(target)
+
+ // Do we have this already? If so, don't add it again.
+ if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) {
+ return
+ }
+
+ // Add the edge to the set
+ g.edges.Add(edge)
+
+ // Add the down edge
+ s, ok := g.downEdges[sourceCode]
+ if !ok {
+ s = new(Set)
+ g.downEdges[sourceCode] = s
+ }
+ s.Add(target)
+
+ // Add the up edge
+ s, ok = g.upEdges[targetCode]
+ if !ok {
+ s = new(Set)
+ g.upEdges[targetCode] = s
+ }
+ s.Add(source)
+}
+
+// String outputs some human-friendly output for the graph structure.
+func (g *Graph) StringWithNodeTypes() string {
+ var buf bytes.Buffer
+
+ // Build the list of node names and a mapping so that we can more
+ // easily alphabetize the output to remain deterministic.
+ vertices := g.Vertices()
+ names := make([]string, 0, len(vertices))
+ mapping := make(map[string]Vertex, len(vertices))
+ for _, v := range vertices {
+ name := VertexName(v)
+ names = append(names, name)
+ mapping[name] = v
+ }
+ sort.Strings(names)
+
+ // Write each node in order...
+ for _, name := range names {
+ v := mapping[name]
+ targets := g.downEdges[hashcode(v)]
+
+ buf.WriteString(fmt.Sprintf("%s - %T\n", name, v))
+
+ // Alphabetize dependencies
+ deps := make([]string, 0, targets.Len())
+ targetNodes := make(map[string]Vertex)
+ for _, target := range targets.List() {
+ dep := VertexName(target)
+ deps = append(deps, dep)
+ targetNodes[dep] = target
+ }
+ sort.Strings(deps)
+
+ // Write dependencies
+ for _, d := range deps {
+ buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d]))
+ }
+ }
+
+ return buf.String()
+}
+
+// String outputs some human-friendly output for the graph structure.
+func (g *Graph) String() string {
+ var buf bytes.Buffer
+
+ // Build the list of node names and a mapping so that we can more
+ // easily alphabetize the output to remain deterministic.
+ vertices := g.Vertices()
+ names := make([]string, 0, len(vertices))
+ mapping := make(map[string]Vertex, len(vertices))
+ for _, v := range vertices {
+ name := VertexName(v)
+ names = append(names, name)
+ mapping[name] = v
+ }
+ sort.Strings(names)
+
+ // Write each node in order...
+ for _, name := range names {
+ v := mapping[name]
+ targets := g.downEdges[hashcode(v)]
+
+ buf.WriteString(fmt.Sprintf("%s\n", name))
+
+ // Alphabetize dependencies
+ deps := make([]string, 0, targets.Len())
+ for _, target := range targets.List() {
+ deps = append(deps, VertexName(target))
+ }
+ sort.Strings(deps)
+
+ // Write dependencies
+ for _, d := range deps {
+ buf.WriteString(fmt.Sprintf(" %s\n", d))
+ }
+ }
+
+ return buf.String()
+}
+
+func (g *Graph) init() {
+ if g.vertices == nil {
+ g.vertices = new(Set)
+ }
+ if g.edges == nil {
+ g.edges = new(Set)
+ }
+ if g.downEdges == nil {
+ g.downEdges = make(map[interface{}]*Set)
+ }
+ if g.upEdges == nil {
+ g.upEdges = make(map[interface{}]*Set)
+ }
+}
+
+// Dot returns a dot-formatted representation of the Graph.
+func (g *Graph) Dot(opts *DotOpts) []byte {
+ return newMarshalGraph("", g).Dot(opts)
+}
+
+// MarshalJSON returns a JSON representation of the entire Graph.
+func (g *Graph) MarshalJSON() ([]byte, error) {
+ dg := newMarshalGraph("root", g)
+ return json.MarshalIndent(dg, "", " ")
+}
+
+// SetDebugWriter sets the io.Writer where the Graph will record debug
+// information. After this is set, the graph will immediately encode itself to
+// the stream, and continue to record all subsequent operations.
+func (g *Graph) SetDebugWriter(w io.Writer) {
+ g.debug = &encoder{w: w}
+ g.debug.Encode(newMarshalGraph("root", g))
+}
+
+// DebugVertexInfo encodes arbitrary information about a vertex in the graph
+// debug logs.
+func (g *Graph) DebugVertexInfo(v Vertex, info string) {
+ va := newVertexInfo(typeVertexInfo, v, info)
+ g.debug.Encode(va)
+}
+
+// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug
+// logs.
+func (g *Graph) DebugEdgeInfo(e Edge, info string) {
+ ea := newEdgeInfo(typeEdgeInfo, e, info)
+ g.debug.Encode(ea)
+}
+
+// DebugVisitInfo records a visit to a Vertex during a walk operation.
+func (g *Graph) DebugVisitInfo(v Vertex, info string) {
+ vi := newVertexInfo(typeVisitInfo, v, info)
+ g.debug.Encode(vi)
+}
+
+// DebugOperation marks the start of a set of graph transformations in
+// the debug log, and returns a DebugOperationEnd func, which marks the end of
+// the operation in the log. Additional information can be added to the log via
+// the info parameter.
+//
+// The returned func's End method allows this method to be called from a single
+// defer statement:
+// defer g.DebugOperationBegin("OpName", "operating").End("")
+//
+// The returned function must be called to properly close the logical operation
+// in the logs.
+func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd {
+ return g.debug.BeginOperation(operation, info)
+}
+
+// VertexName returns the name of a vertex.
+func VertexName(raw Vertex) string {
+ switch v := raw.(type) {
+ case NamedVertex:
+ return v.Name()
+ case fmt.Stringer:
+ return fmt.Sprintf("%s", v)
+ default:
+ return fmt.Sprintf("%v", v)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go
new file mode 100644
index 00000000..16d5dd6d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/marshal.go
@@ -0,0 +1,462 @@
+package dag
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+const (
+ typeOperation = "Operation"
+ typeTransform = "Transform"
+ typeWalk = "Walk"
+ typeDepthFirstWalk = "DepthFirstWalk"
+ typeReverseDepthFirstWalk = "ReverseDepthFirstWalk"
+ typeTransitiveReduction = "TransitiveReduction"
+ typeEdgeInfo = "EdgeInfo"
+ typeVertexInfo = "VertexInfo"
+ typeVisitInfo = "VisitInfo"
+)
+
+// the marshal* structs are for serialization of the graph data.
+type marshalGraph struct {
+ // Type is always "Graph", for identification as a top level object in the
+ // JSON stream.
+ Type string
+
+ // Each marshal structure requires a unique ID so that it can be referenced
+ // by other structures.
+ ID string `json:",omitempty"`
+
+ // Human readable name for this graph.
+ Name string `json:",omitempty"`
+
+ // Arbitrary attributes that can be added to the output.
+ Attrs map[string]string `json:",omitempty"`
+
+ // List of graph vertices, sorted by ID.
+ Vertices []*marshalVertex `json:",omitempty"`
+
+ // List of edges, sorted by Source ID.
+ Edges []*marshalEdge `json:",omitempty"`
+
+ // Any number of subgraphs. A subgraph itself is considered a vertex, and
+ // may be referenced by either end of an edge.
+ Subgraphs []*marshalGraph `json:",omitempty"`
+
+ // Any lists of vertices that are included in cycles.
+ Cycles [][]*marshalVertex `json:",omitempty"`
+}
+
+// The add, remove, connect, removeEdge methods mirror the basic Graph
+// manipulations to reconstruct a marshalGraph from a debug log.
+func (g *marshalGraph) add(v *marshalVertex) {
+ g.Vertices = append(g.Vertices, v)
+ sort.Sort(vertices(g.Vertices))
+}
+
+func (g *marshalGraph) remove(v *marshalVertex) {
+ for i, existing := range g.Vertices {
+ if v.ID == existing.ID {
+ g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...)
+ return
+ }
+ }
+}
+
+func (g *marshalGraph) connect(e *marshalEdge) {
+ g.Edges = append(g.Edges, e)
+ sort.Sort(edges(g.Edges))
+}
+
+func (g *marshalGraph) removeEdge(e *marshalEdge) {
+ for i, existing := range g.Edges {
+ if e.Source == existing.Source && e.Target == existing.Target {
+ g.Edges = append(g.Edges[:i], g.Edges[i+1:]...)
+ return
+ }
+ }
+}
+
+func (g *marshalGraph) vertexByID(id string) *marshalVertex {
+ for _, v := range g.Vertices {
+ if id == v.ID {
+ return v
+ }
+ }
+ return nil
+}
+
+type marshalVertex struct {
+ // Unique ID, used to reference this vertex from other structures.
+ ID string
+
+ // Human readable name
+ Name string `json:",omitempty"`
+
+ Attrs map[string]string `json:",omitempty"`
+
+ // This is to help transition from the old Dot interfaces. We record if the
+ // node was a GraphNodeDotter here, so we can call it to get attributes.
+ graphNodeDotter GraphNodeDotter
+}
+
+func newMarshalVertex(v Vertex) *marshalVertex {
+ dn, ok := v.(GraphNodeDotter)
+ if !ok {
+ dn = nil
+ }
+
+ return &marshalVertex{
+ ID: marshalVertexID(v),
+ Name: VertexName(v),
+ Attrs: make(map[string]string),
+ graphNodeDotter: dn,
+ }
+}
+
+// vertices is a sort.Interface implementation for sorting vertices by ID
+type vertices []*marshalVertex
+
+func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name }
+func (v vertices) Len() int { return len(v) }
+func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
+
+type marshalEdge struct {
+ // Human readable name
+ Name string
+
+ // Source and Target Vertices by ID
+ Source string
+ Target string
+
+ Attrs map[string]string `json:",omitempty"`
+}
+
+func newMarshalEdge(e Edge) *marshalEdge {
+ return &marshalEdge{
+ Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())),
+ Source: marshalVertexID(e.Source()),
+ Target: marshalVertexID(e.Target()),
+ Attrs: make(map[string]string),
+ }
+}
+
+// edges is a sort.Interface implementation for sorting edges by Source ID
+type edges []*marshalEdge
+
+func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name }
+func (e edges) Len() int { return len(e) }
+func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+
+// build a marshalGraph structure from a *Graph
+func newMarshalGraph(name string, g *Graph) *marshalGraph {
+ mg := &marshalGraph{
+ Type: "Graph",
+ Name: name,
+ Attrs: make(map[string]string),
+ }
+
+ for _, v := range g.Vertices() {
+ id := marshalVertexID(v)
+ if sg, ok := marshalSubgrapher(v); ok {
+ smg := newMarshalGraph(VertexName(v), sg)
+ smg.ID = id
+ mg.Subgraphs = append(mg.Subgraphs, smg)
+ }
+
+ mv := newMarshalVertex(v)
+ mg.Vertices = append(mg.Vertices, mv)
+ }
+
+ sort.Sort(vertices(mg.Vertices))
+
+ for _, e := range g.Edges() {
+ mg.Edges = append(mg.Edges, newMarshalEdge(e))
+ }
+
+ sort.Sort(edges(mg.Edges))
+
+ for _, c := range (&AcyclicGraph{*g}).Cycles() {
+ var cycle []*marshalVertex
+ for _, v := range c {
+ mv := newMarshalVertex(v)
+ cycle = append(cycle, mv)
+ }
+ mg.Cycles = append(mg.Cycles, cycle)
+ }
+
+ return mg
+}
+
+// Attempt to return a unique ID for any vertex.
+func marshalVertexID(v Vertex) string {
+ val := reflect.ValueOf(v)
+ switch val.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+ return strconv.Itoa(int(val.Pointer()))
+ case reflect.Interface:
+ return strconv.Itoa(int(val.InterfaceData()[1]))
+ }
+
+ if v, ok := v.(Hashable); ok {
+ h := v.Hashcode()
+ if h, ok := h.(string); ok {
+ return h
+ }
+ }
+
+ // fallback to a name, which we hope is unique.
+ return VertexName(v)
+
+ // we could try harder by attempting to read the arbitrary value from the
+ // interface, but we shouldn't get here from terraform right now.
+}
+
+// check for a Subgrapher, and return the underlying *Graph.
+func marshalSubgrapher(v Vertex) (*Graph, bool) {
+ sg, ok := v.(Subgrapher)
+ if !ok {
+ return nil, false
+ }
+
+ switch g := sg.Subgraph().DirectedGraph().(type) {
+ case *Graph:
+ return g, true
+ case *AcyclicGraph:
+ return &g.Graph, true
+ }
+
+ return nil, false
+}
+
+// The DebugOperationEnd func type provides a way to call an End function via a
+// method call, allowing for the chaining of methods in a defer statement.
+type DebugOperationEnd func(string)
+
+// End calls function e with the info parameter, marking the end of this
+// operation in the logs.
+func (e DebugOperationEnd) End(info string) { e(info) }
+
+// encoder provides methods to write debug data to an io.Writer, and is a noop
+// when no writer is present
+type encoder struct {
+ sync.Mutex
+ w io.Writer
+}
+
+// Encode is analogous to json.Encoder.Encode
+func (e *encoder) Encode(i interface{}) {
+ if e == nil || e.w == nil {
+ return
+ }
+ e.Lock()
+ defer e.Unlock()
+
+ js, err := json.Marshal(i)
+ if err != nil {
+ log.Println("[ERROR] dag:", err)
+ return
+ }
+ js = append(js, '\n')
+
+ _, err = e.w.Write(js)
+ if err != nil {
+ log.Println("[ERROR] dag:", err)
+ return
+ }
+}
+
+func (e *encoder) Add(v Vertex) {
+ e.Encode(marshalTransform{
+ Type: typeTransform,
+ AddVertex: newMarshalVertex(v),
+ })
+}
+
+// Remove records the removal of Vertex v.
+func (e *encoder) Remove(v Vertex) {
+ e.Encode(marshalTransform{
+ Type: typeTransform,
+ RemoveVertex: newMarshalVertex(v),
+ })
+}
+
+func (e *encoder) Connect(edge Edge) {
+ e.Encode(marshalTransform{
+ Type: typeTransform,
+ AddEdge: newMarshalEdge(edge),
+ })
+}
+
+func (e *encoder) RemoveEdge(edge Edge) {
+ e.Encode(marshalTransform{
+ Type: typeTransform,
+ RemoveEdge: newMarshalEdge(edge),
+ })
+}
+
+// BeginOperation marks the start of set of graph transformations, and returns
+// an EndDebugOperation func to be called once the opration is complete.
+func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd {
+ if e == nil {
+ return func(string) {}
+ }
+
+ e.Encode(marshalOperation{
+ Type: typeOperation,
+ Begin: op,
+ Info: info,
+ })
+
+ return func(info string) {
+ e.Encode(marshalOperation{
+ Type: typeOperation,
+ End: op,
+ Info: info,
+ })
+ }
+}
+
+// structure for recording graph transformations
+type marshalTransform struct {
+ // Type: "Transform"
+ Type string
+ AddEdge *marshalEdge `json:",omitempty"`
+ RemoveEdge *marshalEdge `json:",omitempty"`
+ AddVertex *marshalVertex `json:",omitempty"`
+ RemoveVertex *marshalVertex `json:",omitempty"`
+}
+
+func (t marshalTransform) Transform(g *marshalGraph) {
+ switch {
+ case t.AddEdge != nil:
+ g.connect(t.AddEdge)
+ case t.RemoveEdge != nil:
+ g.removeEdge(t.RemoveEdge)
+ case t.AddVertex != nil:
+ g.add(t.AddVertex)
+ case t.RemoveVertex != nil:
+ g.remove(t.RemoveVertex)
+ }
+}
+
+// this structure allows us to decode any object in the json stream for
+// inspection, then re-decode it into a proper struct if needed.
+type streamDecode struct {
+ Type string
+ Map map[string]interface{}
+ JSON []byte
+}
+
+func (s *streamDecode) UnmarshalJSON(d []byte) error {
+ s.JSON = d
+ err := json.Unmarshal(d, &s.Map)
+ if err != nil {
+ return err
+ }
+
+ if t, ok := s.Map["Type"]; ok {
+ s.Type, _ = t.(string)
+ }
+ return nil
+}
+
+// structure for recording the beginning and end of any multi-step
+// transformations. These are informational, and not required to reproduce the
+// graph state.
+type marshalOperation struct {
+ Type string
+ Begin string `json:",omitempty"`
+ End string `json:",omitempty"`
+ Info string `json:",omitempty"`
+}
+
+// decodeGraph decodes a marshalGraph from an encoded graph stream.
+func decodeGraph(r io.Reader) (*marshalGraph, error) {
+ dec := json.NewDecoder(r)
+
+ // a stream should always start with a graph
+ g := &marshalGraph{}
+
+ err := dec.Decode(g)
+ if err != nil {
+ return nil, err
+ }
+
+ // now replay any operations that occurred on the original graph
+ for dec.More() {
+ s := &streamDecode{}
+ err := dec.Decode(s)
+ if err != nil {
+ return g, err
+ }
+
+ // the only Type we're concerned with here is Transform to complete the
+ // Graph
+ if s.Type != typeTransform {
+ continue
+ }
+
+ t := &marshalTransform{}
+ err = json.Unmarshal(s.JSON, t)
+ if err != nil {
+ return g, err
+ }
+ t.Transform(g)
+ }
+ return g, nil
+}
+
+// marshalVertexInfo allows encoding arbitrary information about the a single
+// Vertex in the logs. These are accumulated for informational display while
+// rebuilding the graph.
+type marshalVertexInfo struct {
+ Type string
+ Vertex *marshalVertex
+ Info string
+}
+
+func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo {
+ return &marshalVertexInfo{
+ Type: infoType,
+ Vertex: newMarshalVertex(v),
+ Info: info,
+ }
+}
+
+// marshalEdgeInfo allows encoding arbitrary information about the a single
+// Edge in the logs. These are accumulated for informational display while
+// rebuilding the graph.
+type marshalEdgeInfo struct {
+ Type string
+ Edge *marshalEdge
+ Info string
+}
+
+func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo {
+ return &marshalEdgeInfo{
+ Type: infoType,
+ Edge: newMarshalEdge(e),
+ Info: info,
+ }
+}
+
+// JSON2Dot reads a Graph debug log from and io.Reader, and converts the final
+// graph dot format.
+//
+// TODO: Allow returning the output at a certain point during decode.
+// Encode extra information from the json log into the Dot.
+func JSON2Dot(r io.Reader) ([]byte, error) {
+ g, err := decodeGraph(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return g.Dot(nil), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/set.go b/vendor/github.com/hashicorp/terraform/dag/set.go
new file mode 100644
index 00000000..3929c9d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/set.go
@@ -0,0 +1,109 @@
+package dag
+
+import (
+ "sync"
+)
+
+// Set is a set data structure.
+type Set struct {
+ m map[interface{}]interface{}
+ once sync.Once
+}
+
+// Hashable is the interface used by set to get the hash code of a value.
+// If this isn't given, then the value of the item being added to the set
+// itself is used as the comparison value.
+type Hashable interface {
+ Hashcode() interface{}
+}
+
+// hashcode returns the hashcode used for set elements.
+func hashcode(v interface{}) interface{} {
+ if h, ok := v.(Hashable); ok {
+ return h.Hashcode()
+ }
+
+ return v
+}
+
+// Add adds an item to the set
+func (s *Set) Add(v interface{}) {
+ s.once.Do(s.init)
+ s.m[hashcode(v)] = v
+}
+
+// Delete removes an item from the set.
+func (s *Set) Delete(v interface{}) {
+ s.once.Do(s.init)
+ delete(s.m, hashcode(v))
+}
+
+// Include returns true/false of whether a value is in the set.
+func (s *Set) Include(v interface{}) bool {
+ s.once.Do(s.init)
+ _, ok := s.m[hashcode(v)]
+ return ok
+}
+
+// Intersection computes the set intersection with other.
+func (s *Set) Intersection(other *Set) *Set {
+ result := new(Set)
+ if s == nil {
+ return result
+ }
+ if other != nil {
+ for _, v := range s.m {
+ if other.Include(v) {
+ result.Add(v)
+ }
+ }
+ }
+
+ return result
+}
+
+// Difference returns a set with the elements that s has but
+// other doesn't.
+func (s *Set) Difference(other *Set) *Set {
+ result := new(Set)
+ if s != nil {
+ for k, v := range s.m {
+ var ok bool
+ if other != nil {
+ _, ok = other.m[k]
+ }
+ if !ok {
+ result.Add(v)
+ }
+ }
+ }
+
+ return result
+}
+
+// Len is the number of items in the set.
+func (s *Set) Len() int {
+ if s == nil {
+ return 0
+ }
+
+ return len(s.m)
+}
+
+// List returns the list of set elements.
+func (s *Set) List() []interface{} {
+ if s == nil {
+ return nil
+ }
+
+ r := make([]interface{}, 0, len(s.m))
+ for _, v := range s.m {
+ r = append(r, v)
+ }
+
+ return r
+}
+
+func (s *Set) init() {
+ s.m = make(map[interface{}]interface{})
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/tarjan.go b/vendor/github.com/hashicorp/terraform/dag/tarjan.go
new file mode 100644
index 00000000..9d8b25ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/tarjan.go
@@ -0,0 +1,107 @@
+package dag
+
+// StronglyConnected returns the list of strongly connected components
+// within the Graph g. This information is primarily used by this package
+// for cycle detection, but strongly connected components have widespread
+// use.
+func StronglyConnected(g *Graph) [][]Vertex {
+ vs := g.Vertices()
+ acct := sccAcct{
+ NextIndex: 1,
+ VertexIndex: make(map[Vertex]int, len(vs)),
+ }
+ for _, v := range vs {
+ // Recurse on any non-visited nodes
+ if acct.VertexIndex[v] == 0 {
+ stronglyConnected(&acct, g, v)
+ }
+ }
+ return acct.SCC
+}
+
+func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int {
+ // Initial vertex visit
+ index := acct.visit(v)
+ minIdx := index
+
+ for _, raw := range g.DownEdges(v).List() {
+ target := raw.(Vertex)
+ targetIdx := acct.VertexIndex[target]
+
+ // Recurse on successor if not yet visited
+ if targetIdx == 0 {
+ minIdx = min(minIdx, stronglyConnected(acct, g, target))
+ } else if acct.inStack(target) {
+ // Check if the vertex is in the stack
+ minIdx = min(minIdx, targetIdx)
+ }
+ }
+
+ // Pop the strongly connected components off the stack if
+ // this is a root vertex
+ if index == minIdx {
+ var scc []Vertex
+ for {
+ v2 := acct.pop()
+ scc = append(scc, v2)
+ if v2 == v {
+ break
+ }
+ }
+
+ acct.SCC = append(acct.SCC, scc)
+ }
+
+ return minIdx
+}
+
+func min(a, b int) int {
+ if a <= b {
+ return a
+ }
+ return b
+}
+
+// sccAcct is used ot pass around accounting information for
+// the StronglyConnectedComponents algorithm
+type sccAcct struct {
+ NextIndex int
+ VertexIndex map[Vertex]int
+ Stack []Vertex
+ SCC [][]Vertex
+}
+
+// visit assigns an index and pushes a vertex onto the stack
+func (s *sccAcct) visit(v Vertex) int {
+ idx := s.NextIndex
+ s.VertexIndex[v] = idx
+ s.NextIndex++
+ s.push(v)
+ return idx
+}
+
+// push adds a vertex to the stack
+func (s *sccAcct) push(n Vertex) {
+ s.Stack = append(s.Stack, n)
+}
+
+// pop removes a vertex from the stack
+func (s *sccAcct) pop() Vertex {
+ n := len(s.Stack)
+ if n == 0 {
+ return nil
+ }
+ vertex := s.Stack[n-1]
+ s.Stack = s.Stack[:n-1]
+ return vertex
+}
+
+// inStack checks if a vertex is in the stack
+func (s *sccAcct) inStack(needle Vertex) bool {
+ for _, n := range s.Stack {
+ if n == needle {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go
new file mode 100644
index 00000000..a74f1142
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/walk.go
@@ -0,0 +1,445 @@
+package dag
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/go-multierror"
+)
+
+// Walker is used to walk every vertex of a graph in parallel.
+//
+// A vertex will only be walked when the dependencies of that vertex have
+// been walked. If two vertices can be walked at the same time, they will be.
+//
+// Update can be called to update the graph. This can be called even during
+// a walk, cahnging vertices/edges mid-walk. This should be done carefully.
+// If a vertex is removed but has already been executed, the result of that
+// execution (any error) is still returned by Wait. Changing or re-adding
+// a vertex that has already executed has no effect. Changing edges of
+// a vertex that has already executed has no effect.
+//
+// Non-parallelism can be enforced by introducing a lock in your callback
+// function. However, the goroutine overhead of a walk will remain.
+// Walker will create V*2 goroutines (one for each vertex, and dependency
+// waiter for each vertex). In general this should be of no concern unless
+// there are a huge number of vertices.
+//
+// The walk is depth first by default. This can be changed with the Reverse
+// option.
+//
+// A single walker is only valid for one graph walk. After the walk is complete
+// you must construct a new walker to walk again. State for the walk is never
+// deleted in case vertices or edges are changed.
+type Walker struct {
+ // Callback is what is called for each vertex
+ Callback WalkFunc
+
+ // Reverse, if true, causes the source of an edge to depend on a target.
+ // When false (default), the target depends on the source.
+ Reverse bool
+
+ // changeLock must be held to modify any of the fields below. Only Update
+ // should modify these fields. Modifying them outside of Update can cause
+ // serious problems.
+ changeLock sync.Mutex
+ vertices Set
+ edges Set
+ vertexMap map[Vertex]*walkerVertex
+
+ // wait is done when all vertices have executed. It may become "undone"
+ // if new vertices are added.
+ wait sync.WaitGroup
+
+ // errMap contains the errors recorded so far for execution. Reading
+ // and writing should hold errLock.
+ errMap map[Vertex]error
+ errLock sync.Mutex
+}
+
+type walkerVertex struct {
+ // These should only be set once on initialization and never written again.
+ // They are not protected by a lock since they don't need to be since
+ // they are write-once.
+
+ // DoneCh is closed when this vertex has completed execution, regardless
+ // of success.
+ //
+ // CancelCh is closed when the vertex should cancel execution. If execution
+ // is already complete (DoneCh is closed), this has no effect. Otherwise,
+ // execution is cancelled as quickly as possible.
+ DoneCh chan struct{}
+ CancelCh chan struct{}
+
+ // Dependency information. Any changes to any of these fields requires
+ // holding DepsLock.
+ //
+ // DepsCh is sent a single value that denotes whether the upstream deps
+ // were successful (no errors). Any value sent means that the upstream
+ // dependencies are complete. No other values will ever be sent again.
+ //
+ // DepsUpdateCh is closed when there is a new DepsCh set.
+ DepsCh chan bool
+ DepsUpdateCh chan struct{}
+ DepsLock sync.Mutex
+
+ // Below is not safe to read/write in parallel. This behavior is
+ // enforced by changes only happening in Update. Nothing else should
+ // ever modify these.
+ deps map[Vertex]chan struct{}
+ depsCancelCh chan struct{}
+}
+
+// errWalkUpstream is used in the errMap of a walk to note that an upstream
+// dependency failed so this vertex wasn't run. This is not shown in the final
+// user-returned error.
+var errWalkUpstream = errors.New("upstream dependency failed")
+
+// Wait waits for the completion of the walk and returns any errors (
+// in the form of a multierror) that occurred. Update should be called
+// to populate the walk with vertices and edges prior to calling this.
+//
+// Wait will return as soon as all currently known vertices are complete.
+// If you plan on calling Update with more vertices in the future, you
+// should not call Wait until after this is done.
+func (w *Walker) Wait() error {
+ // Wait for completion
+ w.wait.Wait()
+
+ // Grab the error lock
+ w.errLock.Lock()
+ defer w.errLock.Unlock()
+
+ // Build the error
+ var result error
+ for v, err := range w.errMap {
+ if err != nil && err != errWalkUpstream {
+ result = multierror.Append(result, fmt.Errorf(
+ "%s: %s", VertexName(v), err))
+ }
+ }
+
+ return result
+}
+
+// Update updates the currently executing walk with the given graph.
+// This will perform a diff of the vertices and edges and update the walker.
+// Already completed vertices remain completed (including any errors during
+// their execution).
+//
+// This returns immediately once the walker is updated; it does not wait
+// for completion of the walk.
+//
+// Multiple Updates can be called in parallel. Update can be called at any
+// time during a walk.
+func (w *Walker) Update(g *AcyclicGraph) {
+ var v, e *Set
+ if g != nil {
+ v, e = g.vertices, g.edges
+ }
+
+ // Grab the change lock so no more updates happen but also so that
+ // no new vertices are executed during this time since we may be
+ // removing them.
+ w.changeLock.Lock()
+ defer w.changeLock.Unlock()
+
+ // Initialize fields
+ if w.vertexMap == nil {
+ w.vertexMap = make(map[Vertex]*walkerVertex)
+ }
+
+ // Calculate all our sets
+ newEdges := e.Difference(&w.edges)
+ oldEdges := w.edges.Difference(e)
+ newVerts := v.Difference(&w.vertices)
+ oldVerts := w.vertices.Difference(v)
+
+ // Add the new vertices
+ for _, raw := range newVerts.List() {
+ v := raw.(Vertex)
+
+ // Add to the waitgroup so our walk is not done until everything finishes
+ w.wait.Add(1)
+
+ // Add to our own set so we know about it already
+ log.Printf("[DEBUG] dag/walk: added new vertex: %q", VertexName(v))
+ w.vertices.Add(raw)
+
+ // Initialize the vertex info
+ info := &walkerVertex{
+ DoneCh: make(chan struct{}),
+ CancelCh: make(chan struct{}),
+ deps: make(map[Vertex]chan struct{}),
+ }
+
+ // Add it to the map and kick off the walk
+ w.vertexMap[v] = info
+ }
+
+ // Remove the old vertices
+ for _, raw := range oldVerts.List() {
+ v := raw.(Vertex)
+
+ // Get the vertex info so we can cancel it
+ info, ok := w.vertexMap[v]
+ if !ok {
+ // This vertex for some reason was never in our map. This
+ // shouldn't be possible.
+ continue
+ }
+
+ // Cancel the vertex
+ close(info.CancelCh)
+
+ // Delete it out of the map
+ delete(w.vertexMap, v)
+
+ log.Printf("[DEBUG] dag/walk: removed vertex: %q", VertexName(v))
+ w.vertices.Delete(raw)
+ }
+
+ // Add the new edges
+ var changedDeps Set
+ for _, raw := range newEdges.List() {
+ edge := raw.(Edge)
+ waiter, dep := w.edgeParts(edge)
+
+ // Get the info for the waiter
+ waiterInfo, ok := w.vertexMap[waiter]
+ if !ok {
+ // Vertex doesn't exist... shouldn't be possible but ignore.
+ continue
+ }
+
+ // Get the info for the dep
+ depInfo, ok := w.vertexMap[dep]
+ if !ok {
+ // Vertex doesn't exist... shouldn't be possible but ignore.
+ continue
+ }
+
+ // Add the dependency to our waiter
+ waiterInfo.deps[dep] = depInfo.DoneCh
+
+ // Record that the deps changed for this waiter
+ changedDeps.Add(waiter)
+
+ log.Printf(
+ "[DEBUG] dag/walk: added edge: %q waiting on %q",
+ VertexName(waiter), VertexName(dep))
+ w.edges.Add(raw)
+ }
+
+ // Process reoved edges
+ for _, raw := range oldEdges.List() {
+ edge := raw.(Edge)
+ waiter, dep := w.edgeParts(edge)
+
+ // Get the info for the waiter
+ waiterInfo, ok := w.vertexMap[waiter]
+ if !ok {
+ // Vertex doesn't exist... shouldn't be possible but ignore.
+ continue
+ }
+
+ // Delete the dependency from the waiter
+ delete(waiterInfo.deps, dep)
+
+ // Record that the deps changed for this waiter
+ changedDeps.Add(waiter)
+
+ log.Printf(
+ "[DEBUG] dag/walk: removed edge: %q waiting on %q",
+ VertexName(waiter), VertexName(dep))
+ w.edges.Delete(raw)
+ }
+
+ // For each vertex with changed dependencies, we need to kick off
+ // a new waiter and notify the vertex of the changes.
+ for _, raw := range changedDeps.List() {
+ v := raw.(Vertex)
+ info, ok := w.vertexMap[v]
+ if !ok {
+ // Vertex doesn't exist... shouldn't be possible but ignore.
+ continue
+ }
+
+ // Create a new done channel
+ doneCh := make(chan bool, 1)
+
+ // Create the channel we close for cancellation
+ cancelCh := make(chan struct{})
+
+ // Build a new deps copy
+ deps := make(map[Vertex]<-chan struct{})
+ for k, v := range info.deps {
+ deps[k] = v
+ }
+
+ // Update the update channel
+ info.DepsLock.Lock()
+ if info.DepsUpdateCh != nil {
+ close(info.DepsUpdateCh)
+ }
+ info.DepsCh = doneCh
+ info.DepsUpdateCh = make(chan struct{})
+ info.DepsLock.Unlock()
+
+ // Cancel the older waiter
+ if info.depsCancelCh != nil {
+ close(info.depsCancelCh)
+ }
+ info.depsCancelCh = cancelCh
+
+ log.Printf(
+ "[DEBUG] dag/walk: dependencies changed for %q, sending new deps",
+ VertexName(v))
+
+ // Start the waiter
+ go w.waitDeps(v, deps, doneCh, cancelCh)
+ }
+
+ // Start all the new vertices. We do this at the end so that all
+ // the edge waiters and changes are setup above.
+ for _, raw := range newVerts.List() {
+ v := raw.(Vertex)
+ go w.walkVertex(v, w.vertexMap[v])
+ }
+}
+
+// edgeParts returns the waiter and the dependency, in that order.
+// The waiter is waiting on the dependency.
+func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) {
+ if w.Reverse {
+ return e.Source(), e.Target()
+ }
+
+ return e.Target(), e.Source()
+}
+
+// walkVertex walks a single vertex, waiting for any dependencies before
+// executing the callback.
+func (w *Walker) walkVertex(v Vertex, info *walkerVertex) {
+ // When we're done executing, lower the waitgroup count
+ defer w.wait.Done()
+
+ // When we're done, always close our done channel
+ defer close(info.DoneCh)
+
+ // Wait for our dependencies. We create a [closed] deps channel so
+ // that we can immediately fall through to load our actual DepsCh.
+ var depsSuccess bool
+ var depsUpdateCh chan struct{}
+ depsCh := make(chan bool, 1)
+ depsCh <- true
+ close(depsCh)
+ for {
+ select {
+ case <-info.CancelCh:
+ // Cancel
+ return
+
+ case depsSuccess = <-depsCh:
+ // Deps complete! Mark as nil to trigger completion handling.
+ depsCh = nil
+
+ case <-depsUpdateCh:
+ // New deps, reloop
+ }
+
+ // Check if we have updated dependencies. This can happen if the
+ // dependencies were satisfied exactly prior to an Update occuring.
+ // In that case, we'd like to take into account new dependencies
+ // if possible.
+ info.DepsLock.Lock()
+ if info.DepsCh != nil {
+ depsCh = info.DepsCh
+ info.DepsCh = nil
+ }
+ if info.DepsUpdateCh != nil {
+ depsUpdateCh = info.DepsUpdateCh
+ }
+ info.DepsLock.Unlock()
+
+ // If we still have no deps channel set, then we're done!
+ if depsCh == nil {
+ break
+ }
+ }
+
+ // If we passed dependencies, we just want to check once more that
+ // we're not cancelled, since this can happen just as dependencies pass.
+ select {
+ case <-info.CancelCh:
+ // Cancelled during an update while dependencies completed.
+ return
+ default:
+ }
+
+ // Run our callback or note that our upstream failed
+ var err error
+ if depsSuccess {
+ log.Printf("[DEBUG] dag/walk: walking %q", VertexName(v))
+ err = w.Callback(v)
+ } else {
+ log.Printf("[DEBUG] dag/walk: upstream errored, not walking %q", VertexName(v))
+ err = errWalkUpstream
+ }
+
+ // Record the error
+ if err != nil {
+ w.errLock.Lock()
+ defer w.errLock.Unlock()
+
+ if w.errMap == nil {
+ w.errMap = make(map[Vertex]error)
+ }
+ w.errMap[v] = err
+ }
+}
+
+func (w *Walker) waitDeps(
+ v Vertex,
+ deps map[Vertex]<-chan struct{},
+ doneCh chan<- bool,
+ cancelCh <-chan struct{}) {
+ // For each dependency given to us, wait for it to complete
+ for dep, depCh := range deps {
+ DepSatisfied:
+ for {
+ select {
+ case <-depCh:
+ // Dependency satisfied!
+ break DepSatisfied
+
+ case <-cancelCh:
+ // Wait cancelled. Note that we didn't satisfy dependencies
+ // so that anything waiting on us also doesn't run.
+ doneCh <- false
+ return
+
+ case <-time.After(time.Second * 5):
+ log.Printf("[DEBUG] dag/walk: vertex %q, waiting for: %q",
+ VertexName(v), VertexName(dep))
+ }
+ }
+ }
+
+ // Dependencies satisfied! We need to check if any errored
+ w.errLock.Lock()
+ defer w.errLock.Unlock()
+ for dep, _ := range deps {
+ if w.errMap[dep] != nil {
+ // One of our dependencies failed, so return false
+ doneCh <- false
+ return
+ }
+ }
+
+ // All dependencies satisfied and successful
+ doneCh <- true
+}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go
new file mode 100644
index 00000000..2bfb3fe3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/flatmap/expand.go
@@ -0,0 +1,147 @@
+package flatmap
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/hil"
+)
+
+// Expand takes a map and a key (prefix) and expands that value into
+// a more complex structure. This is the reverse of the Flatten operation.
+func Expand(m map[string]string, key string) interface{} {
+ // If the key is exactly a key in the map, just return it
+ if v, ok := m[key]; ok {
+ if v == "true" {
+ return true
+ } else if v == "false" {
+ return false
+ }
+
+ return v
+ }
+
+ // Check if the key is an array, and if so, expand the array
+ if v, ok := m[key+".#"]; ok {
+ // If the count of the key is unknown, then just put the unknown
+ // value in the value itself. This will be detected by Terraform
+ // core later.
+ if v == hil.UnknownValue {
+ return v
+ }
+
+ return expandArray(m, key)
+ }
+
+ // Check if this is a prefix in the map
+ prefix := key + "."
+ for k := range m {
+ if strings.HasPrefix(k, prefix) {
+ return expandMap(m, prefix)
+ }
+ }
+
+ return nil
+}
+
+func expandArray(m map[string]string, prefix string) []interface{} {
+ num, err := strconv.ParseInt(m[prefix+".#"], 0, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ // If the number of elements in this array is 0, then return an
+ // empty slice as there is nothing to expand. Trying to expand it
+ // anyway could lead to crashes as any child maps, arrays or sets
+ // that no longer exist are still shown as empty with a count of 0.
+ if num == 0 {
+ return []interface{}{}
+ }
+
+ // The Schema "Set" type stores its values in an array format, but
+ // using numeric hash values instead of ordinal keys. Take the set
+ // of keys regardless of value, and expand them in numeric order.
+ // See GH-11042 for more details.
+ keySet := map[int]bool{}
+ computed := map[string]bool{}
+ for k := range m {
+ if !strings.HasPrefix(k, prefix+".") {
+ continue
+ }
+
+ key := k[len(prefix)+1:]
+ idx := strings.Index(key, ".")
+ if idx != -1 {
+ key = key[:idx]
+ }
+
+ // skip the count value
+ if key == "#" {
+ continue
+ }
+
+ // strip the computed flag if there is one
+ if strings.HasPrefix(key, "~") {
+ key = key[1:]
+ computed[key] = true
+ }
+
+ k, err := strconv.Atoi(key)
+ if err != nil {
+ panic(err)
+ }
+ keySet[int(k)] = true
+ }
+
+ keysList := make([]int, 0, num)
+ for key := range keySet {
+ keysList = append(keysList, key)
+ }
+ sort.Ints(keysList)
+
+ result := make([]interface{}, num)
+ for i, key := range keysList {
+ keyString := strconv.Itoa(key)
+ if computed[keyString] {
+ keyString = "~" + keyString
+ }
+ result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString))
+ }
+
+ return result
+}
+
+func expandMap(m map[string]string, prefix string) map[string]interface{} {
+ // Submaps may not have a '%' key, so we can't count on this value being
+ // here. If we don't have a count, just procede as if we have have a map.
+ if count, ok := m[prefix+"%"]; ok && count == "0" {
+ return map[string]interface{}{}
+ }
+
+ result := make(map[string]interface{})
+ for k := range m {
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+
+ key := k[len(prefix):]
+ idx := strings.Index(key, ".")
+ if idx != -1 {
+ key = key[:idx]
+ }
+ if _, ok := result[key]; ok {
+ continue
+ }
+
+ // skip the map count value
+ if key == "%" {
+ continue
+ }
+
+ result[key] = Expand(m, k[:len(prefix)+len(key)])
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go
new file mode 100644
index 00000000..9ff6e426
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go
@@ -0,0 +1,71 @@
+package flatmap
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Flatten takes a structure and turns into a flat map[string]string.
+//
+// Within the "thing" parameter, only primitive values are allowed. Structs are
+// not supported. Therefore, it can only be slices, maps, primitives, and
+// any combination of those together.
+//
+// See the tests for examples of what inputs are turned into.
+func Flatten(thing map[string]interface{}) Map {
+ result := make(map[string]string)
+
+ for k, raw := range thing {
+ flatten(result, k, reflect.ValueOf(raw))
+ }
+
+ return Map(result)
+}
+
+func flatten(result map[string]string, prefix string, v reflect.Value) {
+ if v.Kind() == reflect.Interface {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Bool:
+ if v.Bool() {
+ result[prefix] = "true"
+ } else {
+ result[prefix] = "false"
+ }
+ case reflect.Int:
+ result[prefix] = fmt.Sprintf("%d", v.Int())
+ case reflect.Map:
+ flattenMap(result, prefix, v)
+ case reflect.Slice:
+ flattenSlice(result, prefix, v)
+ case reflect.String:
+ result[prefix] = v.String()
+ default:
+ panic(fmt.Sprintf("Unknown: %s", v))
+ }
+}
+
+func flattenMap(result map[string]string, prefix string, v reflect.Value) {
+ for _, k := range v.MapKeys() {
+ if k.Kind() == reflect.Interface {
+ k = k.Elem()
+ }
+
+ if k.Kind() != reflect.String {
+ panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k))
+ }
+
+ flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k))
+ }
+}
+
+func flattenSlice(result map[string]string, prefix string, v reflect.Value) {
+ prefix = prefix + "."
+
+ result[prefix+"#"] = fmt.Sprintf("%d", v.Len())
+ for i := 0; i < v.Len(); i++ {
+ flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/map.go b/vendor/github.com/hashicorp/terraform/flatmap/map.go
new file mode 100644
index 00000000..46b72c40
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/flatmap/map.go
@@ -0,0 +1,82 @@
+package flatmap
+
+import (
+ "strings"
+)
+
+// Map is a wrapper around map[string]string that provides some helpers
+// above it that assume the map is in the format that flatmap expects
+// (the result of Flatten).
+//
+// All modifying functions such as Delete are done in-place unless
+// otherwise noted.
+type Map map[string]string
+
+// Contains returns true if the map contains the given key.
+func (m Map) Contains(key string) bool {
+ for _, k := range m.Keys() {
+ if k == key {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Delete deletes a key out of the map with the given prefix.
+func (m Map) Delete(prefix string) {
+ for k, _ := range m {
+ match := k == prefix
+ if !match {
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+
+ if k[len(prefix):len(prefix)+1] != "." {
+ continue
+ }
+ }
+
+ delete(m, k)
+ }
+}
+
+// Keys returns all of the top-level keys in this map
+func (m Map) Keys() []string {
+ ks := make(map[string]struct{})
+ for k, _ := range m {
+ idx := strings.Index(k, ".")
+ if idx == -1 {
+ idx = len(k)
+ }
+
+ ks[k[:idx]] = struct{}{}
+ }
+
+ result := make([]string, 0, len(ks))
+ for k, _ := range ks {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+// Merge merges the contents of the other Map into this one.
+//
+// This merge is smarter than a simple map iteration because it
+// will fully replace arrays and other complex structures that
+// are present in this map with the other map's. For example, if
+// this map has a 3 element "foo" list, and m2 has a 2 element "foo"
+// list, then the result will be that m has a 2 element "foo"
+// list.
+func (m Map) Merge(m2 Map) {
+ for _, prefix := range m2.Keys() {
+ m.Delete(prefix)
+
+ for k, v := range m2 {
+ if strings.HasPrefix(k, prefix) {
+ m[k] = v
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/README.md b/vendor/github.com/hashicorp/terraform/helper/README.md
new file mode 100644
index 00000000..d0fee068
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/README.md
@@ -0,0 +1,7 @@
+# Helper Libraries
+
+This folder contains helper libraries for Terraform plugins. A running
+joke is that this is "Terraform standard library" for plugins. The goal
+of the packages in this directory are to provide high-level helpers to
+make it easier to implement the various aspects of writing a plugin for
+Terraform.
diff --git a/vendor/github.com/hashicorp/terraform/helper/config/decode.go b/vendor/github.com/hashicorp/terraform/helper/config/decode.go
new file mode 100644
index 00000000..f470c9b4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/config/decode.go
@@ -0,0 +1,28 @@
+package config
+
+import (
+ "github.com/mitchellh/mapstructure"
+)
+
+func Decode(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) {
+ var md mapstructure.Metadata
+ decoderConfig := &mapstructure.DecoderConfig{
+ Metadata: &md,
+ Result: target,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := mapstructure.NewDecoder(decoderConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, raw := range raws {
+ err := decoder.Decode(raw)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &md, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/config/validator.go b/vendor/github.com/hashicorp/terraform/helper/config/validator.go
new file mode 100644
index 00000000..1a6e023b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/config/validator.go
@@ -0,0 +1,214 @@
+package config
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/flatmap"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Validator is a helper that helps you validate the configuration
+// of your resource, resource provider, etc.
+//
+// At the most basic level, set the Required and Optional lists to be
+// specifiers of keys that are required or optional. If a key shows up
+// that isn't in one of these two lists, then an error is generated.
+//
+// The "specifiers" allowed in this is a fairly rich syntax to help
+// describe the format of your configuration:
+//
+// * Basic keys are just strings. For example: "foo" will match the
+// "foo" key.
+//
+// * Nested structure keys can be matched by doing
+// "listener.*.foo". This will verify that there is at least one
+// listener element that has the "foo" key set.
+//
+// * The existence of a nested structure can be checked by simply
+// doing "listener.*" which will verify that there is at least
+// one element in the "listener" structure. This is NOT
+// validating that "listener" is an array. It is validating
+// that it is a nested structure in the configuration.
+//
+type Validator struct {
+ Required []string
+ Optional []string
+}
+
+func (v *Validator) Validate(
+ c *terraform.ResourceConfig) (ws []string, es []error) {
+ // Flatten the configuration so it is easier to reason about
+ flat := flatmap.Flatten(c.Raw)
+
+ keySet := make(map[string]validatorKey)
+ for i, vs := range [][]string{v.Required, v.Optional} {
+ req := i == 0
+ for _, k := range vs {
+ vk, err := newValidatorKey(k, req)
+ if err != nil {
+ es = append(es, err)
+ continue
+ }
+
+ keySet[k] = vk
+ }
+ }
+
+ purged := make([]string, 0)
+ for _, kv := range keySet {
+ p, w, e := kv.Validate(flat)
+ if len(w) > 0 {
+ ws = append(ws, w...)
+ }
+ if len(e) > 0 {
+ es = append(es, e...)
+ }
+
+ purged = append(purged, p...)
+ }
+
+ // Delete all the keys we processed in order to find
+ // the unknown keys.
+ for _, p := range purged {
+ delete(flat, p)
+ }
+
+ // The rest are unknown
+ for k, _ := range flat {
+ es = append(es, fmt.Errorf("Unknown configuration: %s", k))
+ }
+
+ return
+}
+
+type validatorKey interface {
+ // Validate validates the given configuration and returns viewed keys,
+ // warnings, and errors.
+ Validate(map[string]string) ([]string, []string, []error)
+}
+
+func newValidatorKey(k string, req bool) (validatorKey, error) {
+ var result validatorKey
+
+ parts := strings.Split(k, ".")
+ if len(parts) > 1 && parts[1] == "*" {
+ result = &nestedValidatorKey{
+ Parts: parts,
+ Required: req,
+ }
+ } else {
+ result = &basicValidatorKey{
+ Key: k,
+ Required: req,
+ }
+ }
+
+ return result, nil
+}
+
+// basicValidatorKey validates keys that are basic such as "foo"
+type basicValidatorKey struct {
+ Key string
+ Required bool
+}
+
+func (v *basicValidatorKey) Validate(
+ m map[string]string) ([]string, []string, []error) {
+ for k, _ := range m {
+ // If we have the exact key its a match
+ if k == v.Key {
+ return []string{k}, nil, nil
+ }
+ }
+
+ if !v.Required {
+ return nil, nil, nil
+ }
+
+ return nil, nil, []error{fmt.Errorf(
+ "Key not found: %s", v.Key)}
+}
+
+type nestedValidatorKey struct {
+ Parts []string
+ Required bool
+}
+
+func (v *nestedValidatorKey) validate(
+ m map[string]string,
+ prefix string,
+ offset int) ([]string, []string, []error) {
+ if offset >= len(v.Parts) {
+ // We're at the end. Look for a specific key.
+ v2 := &basicValidatorKey{Key: prefix, Required: v.Required}
+ return v2.Validate(m)
+ }
+
+ current := v.Parts[offset]
+
+ // If we're at offset 0, special case to start at the next one.
+ if offset == 0 {
+ return v.validate(m, current, offset+1)
+ }
+
+ // Determine if we're doing a "for all" or a specific key
+ if current != "*" {
+ // We're looking at a specific key, continue on.
+ return v.validate(m, prefix+"."+current, offset+1)
+ }
+
+ // We're doing a "for all", so we loop over.
+ countStr, ok := m[prefix+".#"]
+ if !ok {
+ if !v.Required {
+ // It wasn't required, so its no problem.
+ return nil, nil, nil
+ }
+
+ return nil, nil, []error{fmt.Errorf(
+ "Key not found: %s", prefix)}
+ }
+
+ count, err := strconv.ParseInt(countStr, 0, 0)
+ if err != nil {
+ // This shouldn't happen if flatmap works properly
+ panic("invalid flatmap array")
+ }
+
+ var e []error
+ var w []string
+ u := make([]string, 1, count+1)
+ u[0] = prefix + ".#"
+ for i := 0; i < int(count); i++ {
+ prefix := fmt.Sprintf("%s.%d", prefix, i)
+
+ // Mark that we saw this specific key
+ u = append(u, prefix)
+
+ // Mark all prefixes of this
+ for k, _ := range m {
+ if !strings.HasPrefix(k, prefix+".") {
+ continue
+ }
+ u = append(u, k)
+ }
+
+ // If we have more parts, then validate deeper
+ if offset+1 < len(v.Parts) {
+ u2, w2, e2 := v.validate(m, prefix, offset+1)
+
+ u = append(u, u2...)
+ w = append(w, w2...)
+ e = append(e, e2...)
+ }
+ }
+
+ return u, w, e
+}
+
+func (v *nestedValidatorKey) Validate(
+ m map[string]string) ([]string, []string, []error) {
+ return v.validate(m, "", 0)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
new file mode 100644
index 00000000..18b8837c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
@@ -0,0 +1,154 @@
+// experiment package contains helper functions for tracking experimental
+// features throughout Terraform.
+//
+// This package should be used for creating, enabling, querying, and deleting
+// experimental features. By unifying all of that onto a single interface,
+// we can have the Go compiler help us by enforcing every place we touch
+// an experimental feature.
+//
+// To create a new experiment:
+//
+// 1. Add the experiment to the global vars list below, prefixed with X_
+//
+// 2. Add the experiment variable to the All listin the init() function
+//
+// 3. Use it!
+//
+// To remove an experiment:
+//
+// 1. Delete the experiment global var.
+//
+// 2. Try to compile and fix all the places where the var was referenced.
+//
+// To use an experiment:
+//
+// 1. Use Flag() if you want the experiment to be available from the CLI.
+//
+// 2. Use Enabled() to check whether it is enabled.
+//
+// As a general user:
+//
+// 1. The `-Xexperiment-name` flag
+// 2. The `TF_X_<experiment-name>` env var.
+// 3. The `TF_X_FORCE` env var can be set to force an experimental feature
+// without human verifications.
+//
+package experiment
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// The experiments that are available are listed below. Any package in
+// Terraform defining an experiment should define the experiments below.
+// By keeping them all within the experiment package we force a single point
+// of definition and use. This allows the compiler to enforce references
+// so it becomes easy to remove the features.
+var (
+ // Shadow graph. This is already on by default. Disabling it will be
+ // allowed for awhile in order for it to not block operations.
+ X_shadow = newBasicID("shadow", "SHADOW", false)
+)
+
+// Global variables this package uses because we are a package
+// with global state.
+var (
+ // all is the list of all experiements. Do not modify this.
+ All []ID
+
+ // enabled keeps track of what flags have been enabled
+ enabled map[string]bool
+ enabledLock sync.Mutex
+
+ // Hidden "experiment" that forces all others to be on without verification
+ x_force = newBasicID("force", "FORCE", false)
+)
+
+func init() {
+ // The list of all experiments, update this when an experiment is added.
+ All = []ID{
+ X_shadow,
+ x_force,
+ }
+
+ // Load
+ reload()
+}
+
+// reload is used by tests to reload the global state. This is called by
+// init publicly.
+func reload() {
+ // Initialize
+ enabledLock.Lock()
+ enabled = make(map[string]bool)
+ enabledLock.Unlock()
+
+ // Set defaults and check env vars
+ for _, id := range All {
+ // Get the default value
+ def := id.Default()
+
+ // If we set it in the env var, default it to true
+ key := fmt.Sprintf("TF_X_%s", strings.ToUpper(id.Env()))
+ if v := os.Getenv(key); v != "" {
+ def = v != "0"
+ }
+
+ // Set the default
+ SetEnabled(id, def)
+ }
+}
+
+// Enabled returns whether an experiment has been enabled or not.
+func Enabled(id ID) bool {
+ enabledLock.Lock()
+ defer enabledLock.Unlock()
+ return enabled[id.Flag()]
+}
+
+// SetEnabled sets an experiment to enabled/disabled. Please check with
+// the experiment docs for when calling this actually affects the experiment.
+func SetEnabled(id ID, v bool) {
+ enabledLock.Lock()
+ defer enabledLock.Unlock()
+ enabled[id.Flag()] = v
+}
+
+// Force returns true if the -Xforce of TF_X_FORCE flag is present, which
+// advises users of this package to not verify with the user that they want
+// experimental behavior and to just continue with it.
+func Force() bool {
+ return Enabled(x_force)
+}
+
+// Flag configures the given FlagSet with the flags to configure
+// all active experiments.
+func Flag(fs *flag.FlagSet) {
+ for _, id := range All {
+ desc := id.Flag()
+ key := fmt.Sprintf("X%s", id.Flag())
+ fs.Var(&idValue{X: id}, key, desc)
+ }
+}
+
+// idValue implements flag.Value for setting the enabled/disabled state
+// of an experiment from the CLI.
+type idValue struct {
+ X ID
+}
+
+func (v *idValue) IsBoolFlag() bool { return true }
+func (v *idValue) String() string { return strconv.FormatBool(Enabled(v.X)) }
+func (v *idValue) Set(raw string) error {
+ b, err := strconv.ParseBool(raw)
+ if err == nil {
+ SetEnabled(v.X, b)
+ }
+
+ return err
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
new file mode 100644
index 00000000..8e2f7073
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
@@ -0,0 +1,34 @@
+package experiment
+
+// ID represents an experimental feature.
+//
+// The global vars defined on this package should be used as ID values.
+// This interface is purposely not implement-able outside of this package
+// so that we can rely on the Go compiler to enforce all experiment references.
+type ID interface {
+ Env() string
+ Flag() string
+ Default() bool
+
+ unexported() // So the ID can't be implemented externally.
+}
+
+// basicID implements ID.
+type basicID struct {
+ EnvValue string
+ FlagValue string
+ DefaultValue bool
+}
+
+func newBasicID(flag, env string, def bool) ID {
+ return &basicID{
+ EnvValue: env,
+ FlagValue: flag,
+ DefaultValue: def,
+ }
+}
+
+func (id *basicID) Env() string { return id.EnvValue }
+func (id *basicID) Flag() string { return id.FlagValue }
+func (id *basicID) Default() bool { return id.DefaultValue }
+func (id *basicID) unexported() {}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
new file mode 100644
index 00000000..64d8263e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
@@ -0,0 +1,22 @@
+package hashcode
+
+import (
+ "hash/crc32"
+)
+
+// String hashes a string to a unique hashcode.
+//
+// crc32 returns a uint32, but for our use we need
+// and non negative integer. Here we cast to an integer
+// and invert it if the result is negative.
+func String(s string) int {
+ v := int(crc32.ChecksumIEEE([]byte(s)))
+ if v >= 0 {
+ return v
+ }
+ if -v >= 0 {
+ return -v
+ }
+ // v == MinInt
+ return 0
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go
new file mode 100644
index 00000000..67be1df1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go
@@ -0,0 +1,41 @@
+package hilmapstructure
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+var hilMapstructureDecodeHookEmptySlice []interface{}
+var hilMapstructureDecodeHookStringSlice []string
+var hilMapstructureDecodeHookEmptyMap map[string]interface{}
+
+// WeakDecode behaves in the same way as mapstructure.WeakDecode but has a
+// DecodeHook which defeats the backward compatibility mode of mapstructure
+// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This
+// allows us to use WeakDecode (desirable), but not fail on empty lists.
+func WeakDecode(m interface{}, rawVal interface{}) error {
+ config := &mapstructure.DecoderConfig{
+ DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) {
+ sliceType := reflect.TypeOf(hilMapstructureDecodeHookEmptySlice)
+ stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice)
+ mapType := reflect.TypeOf(hilMapstructureDecodeHookEmptyMap)
+
+ if (source == sliceType || source == stringSliceType) && target == mapType {
+ return nil, fmt.Errorf("Cannot convert a []interface{} into a map[string]interface{}")
+ }
+
+ return val, nil
+ },
+ WeaklyTypedInput: true,
+ Result: rawVal,
+ }
+
+ decoder, err := mapstructure.NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(m)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
new file mode 100644
index 00000000..433cd77d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
@@ -0,0 +1,100 @@
+package logging
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+ "syscall"
+
+ "github.com/hashicorp/logutils"
+)
+
+// These are the environmental variables that determine if we log, and if
+// we log whether or not the log should go to a file.
+const (
+ EnvLog = "TF_LOG" // Set to True
+ EnvLogFile = "TF_LOG_PATH" // Set to a file
+)
+
+var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"}
+
+// LogOutput determines where we should send logs (if anywhere) and the log level.
+func LogOutput() (logOutput io.Writer, err error) {
+ logOutput = ioutil.Discard
+
+ logLevel := LogLevel()
+ if logLevel == "" {
+ return
+ }
+
+ logOutput = os.Stderr
+ if logPath := os.Getenv(EnvLogFile); logPath != "" {
+ var err error
+ logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // This was the default since the beginning
+ logOutput = &logutils.LevelFilter{
+ Levels: validLevels,
+ MinLevel: logutils.LogLevel(logLevel),
+ Writer: logOutput,
+ }
+
+ return
+}
+
+// SetOutput checks for a log destination with LogOutput, and calls
+// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses
+// ioutil.Discard. Any error from LogOutout is fatal.
+func SetOutput() {
+ out, err := LogOutput()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if out == nil {
+ out = ioutil.Discard
+ }
+
+ log.SetOutput(out)
+}
+
+// LogLevel returns the current log level string based the environment vars
+func LogLevel() string {
+ envLevel := os.Getenv(EnvLog)
+ if envLevel == "" {
+ return ""
+ }
+
+ logLevel := "TRACE"
+ if isValidLogLevel(envLevel) {
+ // allow following for better ux: info, Info or INFO
+ logLevel = strings.ToUpper(envLevel)
+ } else {
+ log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v",
+ envLevel, validLevels)
+ }
+
+ return logLevel
+}
+
+// IsDebugOrHigher returns whether or not the current log level is debug or trace
+func IsDebugOrHigher() bool {
+ level := string(LogLevel())
+ return level == "DEBUG" || level == "TRACE"
+}
+
+func isValidLogLevel(level string) bool {
+ for _, l := range validLevels {
+ if strings.ToUpper(level) == string(l) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/error.go b/vendor/github.com/hashicorp/terraform/helper/resource/error.go
new file mode 100644
index 00000000..7ee21614
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/error.go
@@ -0,0 +1,79 @@
+package resource
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+type NotFoundError struct {
+ LastError error
+ LastRequest interface{}
+ LastResponse interface{}
+ Message string
+ Retries int
+}
+
+func (e *NotFoundError) Error() string {
+ if e.Message != "" {
+ return e.Message
+ }
+
+ if e.Retries > 0 {
+ return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
+ }
+
+ return "couldn't find resource"
+}
+
+// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
+type UnexpectedStateError struct {
+ LastError error
+ State string
+ ExpectedState []string
+}
+
+func (e *UnexpectedStateError) Error() string {
+ return fmt.Sprintf(
+ "unexpected state '%s', wanted target '%s'. last error: %s",
+ e.State,
+ strings.Join(e.ExpectedState, ", "),
+ e.LastError,
+ )
+}
+
+// TimeoutError is returned when WaitForState times out
+type TimeoutError struct {
+ LastError error
+ LastState string
+ Timeout time.Duration
+ ExpectedState []string
+}
+
+func (e *TimeoutError) Error() string {
+ expectedState := "resource to be gone"
+ if len(e.ExpectedState) > 0 {
+ expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
+ }
+
+ extraInfo := make([]string, 0)
+ if e.LastState != "" {
+ extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
+ }
+ if e.Timeout > 0 {
+ extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
+ }
+
+ suffix := ""
+ if len(extraInfo) > 0 {
+ suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
+ }
+
+ if e.LastError != nil {
+ return fmt.Sprintf("timeout while waiting for %s%s: %s",
+ expectedState, suffix, e.LastError)
+ }
+
+ return fmt.Sprintf("timeout while waiting for %s%s",
+ expectedState, suffix)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
new file mode 100644
index 00000000..629582b3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
@@ -0,0 +1,39 @@
+package resource
+
+import (
+ "crypto/rand"
+ "fmt"
+ "math/big"
+ "sync"
+)
+
+const UniqueIdPrefix = `terraform-`
+
+// idCounter is a randomly seeded monotonic counter for generating ordered
+// unique ids. It uses a big.Int so we can easily increment a long numeric
+// string. The max possible hex value here with 12 random bytes is
+// "01000000000000000000000000", so there's no chance of rollover during
+// operation.
+var idMutex sync.Mutex
+var idCounter = big.NewInt(0).SetBytes(randomBytes(12))
+
+// Helper for a resource to generate a unique identifier w/ default prefix
+func UniqueId() string {
+ return PrefixedUniqueId(UniqueIdPrefix)
+}
+
+// Helper for a resource to generate a unique identifier w/ given prefix
+//
+// After the prefix, the ID consists of an incrementing 26 digit value (to match
+// previous timestamp output).
+func PrefixedUniqueId(prefix string) string {
+ idMutex.Lock()
+ defer idMutex.Unlock()
+ return fmt.Sprintf("%s%026x", prefix, idCounter.Add(idCounter, big.NewInt(1)))
+}
+
+func randomBytes(n int) []byte {
+ b := make([]byte, n)
+ rand.Read(b)
+ return b
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/map.go b/vendor/github.com/hashicorp/terraform/helper/resource/map.go
new file mode 100644
index 00000000..a465136f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/map.go
@@ -0,0 +1,140 @@
+package resource
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Map is a map of resources that are supported, and provides helpers for
+// more easily implementing a ResourceProvider.
+type Map struct {
+ Mapping map[string]Resource
+}
+
+func (m *Map) Validate(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ r, ok := m.Mapping[t]
+ if !ok {
+ return nil, []error{fmt.Errorf("Unknown resource type: %s", t)}
+ }
+
+ // If there is no validator set, then it is valid
+ if r.ConfigValidator == nil {
+ return nil, nil
+ }
+
+ return r.ConfigValidator.Validate(c)
+}
+
+// Apply performs a create or update depending on the diff, and calls
+// the proper function on the matching Resource.
+func (m *Map) Apply(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff,
+ meta interface{}) (*terraform.InstanceState, error) {
+ r, ok := m.Mapping[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
+ }
+
+ if d.Destroy || d.RequiresNew() {
+ if s.ID != "" {
+ // Destroy the resource if it is created
+ err := r.Destroy(s, meta)
+ if err != nil {
+ return s, err
+ }
+
+ s.ID = ""
+ }
+
+ // If we're only destroying, and not creating, then return now.
+ // Otherwise, we continue so that we can create a new resource.
+ if !d.RequiresNew() {
+ return nil, nil
+ }
+ }
+
+ var result *terraform.InstanceState
+ var err error
+ if s.ID == "" {
+ result, err = r.Create(s, d, meta)
+ } else {
+ if r.Update == nil {
+ return s, fmt.Errorf(
+ "Resource type '%s' doesn't support update",
+ info.Type)
+ }
+
+ result, err = r.Update(s, d, meta)
+ }
+ if result != nil {
+ if result.Attributes == nil {
+ result.Attributes = make(map[string]string)
+ }
+
+ result.Attributes["id"] = result.ID
+ }
+
+ return result, err
+}
+
+// Diff performs a diff on the proper resource type.
+func (m *Map) Diff(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig,
+ meta interface{}) (*terraform.InstanceDiff, error) {
+ r, ok := m.Mapping[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
+ }
+
+ return r.Diff(s, c, meta)
+}
+
+// Refresh performs a Refresh on the proper resource type.
+//
+// Refresh on the Resource won't be called if the state represents a
+// non-created resource (ID is blank).
+//
+// An error is returned if the resource isn't registered.
+func (m *Map) Refresh(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ meta interface{}) (*terraform.InstanceState, error) {
+ // If the resource isn't created, don't refresh.
+ if s.ID == "" {
+ return s, nil
+ }
+
+ r, ok := m.Mapping[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
+ }
+
+ return r.Refresh(s, meta)
+}
+
+// Resources returns all the resources that are supported by this
+// resource map and can be used to satisfy the Resources method of
+// a ResourceProvider.
+func (m *Map) Resources() []terraform.ResourceType {
+ ks := make([]string, 0, len(m.Mapping))
+ for k, _ := range m.Mapping {
+ ks = append(ks, k)
+ }
+ sort.Strings(ks)
+
+ rs := make([]terraform.ResourceType, 0, len(m.Mapping))
+ for _, k := range ks {
+ rs = append(rs, terraform.ResourceType{
+ Name: k,
+ })
+ }
+
+ return rs
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go
new file mode 100644
index 00000000..0d9c831a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go
@@ -0,0 +1,49 @@
+package resource
+
+import (
+ "github.com/hashicorp/terraform/helper/config"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+type Resource struct {
+ ConfigValidator *config.Validator
+ Create CreateFunc
+ Destroy DestroyFunc
+ Diff DiffFunc
+ Refresh RefreshFunc
+ Update UpdateFunc
+}
+
+// CreateFunc is a function that creates a resource that didn't previously
+// exist.
+type CreateFunc func(
+ *terraform.InstanceState,
+ *terraform.InstanceDiff,
+ interface{}) (*terraform.InstanceState, error)
+
+// DestroyFunc is a function that destroys a resource that previously
+// exists using the state.
+type DestroyFunc func(
+ *terraform.InstanceState,
+ interface{}) error
+
+// DiffFunc is a function that performs a diff of a resource.
+type DiffFunc func(
+ *terraform.InstanceState,
+ *terraform.ResourceConfig,
+ interface{}) (*terraform.InstanceDiff, error)
+
+// RefreshFunc is a function that performs a refresh of a specific type
+// of resource.
+type RefreshFunc func(
+ *terraform.InstanceState,
+ interface{}) (*terraform.InstanceState, error)
+
+// UpdateFunc is a function that is called to update a resource that
+// previously existed. The difference between this and CreateFunc is that
+// the diff is guaranteed to only contain attributes that don't require
+// a new resource.
+type UpdateFunc func(
+ *terraform.InstanceState,
+ *terraform.InstanceDiff,
+ interface{}) (*terraform.InstanceState, error)
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
new file mode 100644
index 00000000..37c586a1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
@@ -0,0 +1,259 @@
+package resource
+
+import (
+ "log"
+ "time"
+)
+
+var refreshGracePeriod = 30 * time.Second
+
+// StateRefreshFunc is a function type used for StateChangeConf that is
+// responsible for refreshing the item being watched for a state change.
+//
+// It returns three results. `result` is any object that will be returned
+// as the final object after waiting for state change. This allows you to
+// return the final updated object, for example an EC2 instance after refreshing
+// it.
+//
+// `state` is the latest state of that object. And `err` is any error that
+// may have happened while refreshing the state.
+type StateRefreshFunc func() (result interface{}, state string, err error)
+
+// StateChangeConf is the configuration struct used for `WaitForState`.
+type StateChangeConf struct {
+ Delay time.Duration // Wait this time before starting checks
+ Pending []string // States that are "allowed" and will continue trying
+ Refresh StateRefreshFunc // Refreshes the current state
+ Target []string // Target state
+ Timeout time.Duration // The amount of time to wait before timeout
+ MinTimeout time.Duration // Smallest time to wait before refreshes
+ PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
+ NotFoundChecks int // Number of times to allow not found
+
+ // This is to work around inconsistent APIs
+ ContinuousTargetOccurence int // Number of times the Target state has to occur continuously
+}
+
+// WaitForState watches an object and waits for it to achieve the state
+// specified in the configuration using the specified Refresh() func,
+// waiting the number of seconds specified in the timeout configuration.
+//
+// If the Refresh function returns a error, exit immediately with that error.
+//
+// If the Refresh function returns a state other than the Target state or one
+// listed in Pending, return immediately with an error.
+//
+// If the Timeout is exceeded before reaching the Target state, return an
+// error.
+//
+// Otherwise, result the result of the first call to the Refresh function to
+// reach the target state.
+func (conf *StateChangeConf) WaitForState() (interface{}, error) {
+ log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target)
+
+ notfoundTick := 0
+ targetOccurence := 0
+
+ // Set a default for times to check for not found
+ if conf.NotFoundChecks == 0 {
+ conf.NotFoundChecks = 20
+ }
+
+ if conf.ContinuousTargetOccurence == 0 {
+ conf.ContinuousTargetOccurence = 1
+ }
+
+ type Result struct {
+ Result interface{}
+ State string
+ Error error
+ Done bool
+ }
+
+ // Read every result from the refresh loop, waiting for a positive result.Done.
+ resCh := make(chan Result, 1)
+ // cancellation channel for the refresh loop
+ cancelCh := make(chan struct{})
+
+ result := Result{}
+
+ go func() {
+ defer close(resCh)
+
+ time.Sleep(conf.Delay)
+
+ // start with 0 delay for the first loop
+ var wait time.Duration
+
+ for {
+ // store the last result
+ resCh <- result
+
+ // wait and watch for cancellation
+ select {
+ case <-cancelCh:
+ return
+ case <-time.After(wait):
+ // first round had no wait
+ if wait == 0 {
+ wait = 100 * time.Millisecond
+ }
+ }
+
+ res, currentState, err := conf.Refresh()
+ result = Result{
+ Result: res,
+ State: currentState,
+ Error: err,
+ }
+
+ if err != nil {
+ resCh <- result
+ return
+ }
+
+ // If we're waiting for the absence of a thing, then return
+ if res == nil && len(conf.Target) == 0 {
+ targetOccurence++
+ if conf.ContinuousTargetOccurence == targetOccurence {
+ result.Done = true
+ resCh <- result
+ return
+ }
+ continue
+ }
+
+ if res == nil {
+ // If we didn't find the resource, check if we have been
+ // not finding it for awhile, and if so, report an error.
+ notfoundTick++
+ if notfoundTick > conf.NotFoundChecks {
+ result.Error = &NotFoundError{
+ LastError: err,
+ Retries: notfoundTick,
+ }
+ resCh <- result
+ return
+ }
+ } else {
+ // Reset the counter for when a resource isn't found
+ notfoundTick = 0
+ found := false
+
+ for _, allowed := range conf.Target {
+ if currentState == allowed {
+ found = true
+ targetOccurence++
+ if conf.ContinuousTargetOccurence == targetOccurence {
+ result.Done = true
+ resCh <- result
+ return
+ }
+ continue
+ }
+ }
+
+ for _, allowed := range conf.Pending {
+ if currentState == allowed {
+ found = true
+ targetOccurence = 0
+ break
+ }
+ }
+
+ if !found && len(conf.Pending) > 0 {
+ result.Error = &UnexpectedStateError{
+ LastError: err,
+ State: result.State,
+ ExpectedState: conf.Target,
+ }
+ resCh <- result
+ return
+ }
+ }
+
+ // Wait between refreshes using exponential backoff, except when
+ // waiting for the target state to reoccur.
+ if targetOccurence == 0 {
+ wait *= 2
+ }
+
+ // If a poll interval has been specified, choose that interval.
+ // Otherwise bound the default value.
+ if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
+ wait = conf.PollInterval
+ } else {
+ if wait < conf.MinTimeout {
+ wait = conf.MinTimeout
+ } else if wait > 10*time.Second {
+ wait = 10 * time.Second
+ }
+ }
+
+ log.Printf("[TRACE] Waiting %s before next try", wait)
+ }
+ }()
+
+ // store the last value result from the refresh loop
+ lastResult := Result{}
+
+ timeout := time.After(conf.Timeout)
+ for {
+ select {
+ case r, ok := <-resCh:
+ // channel closed, so return the last result
+ if !ok {
+ return lastResult.Result, lastResult.Error
+ }
+
+ // we reached the intended state
+ if r.Done {
+ return r.Result, r.Error
+ }
+
+ // still waiting, store the last result
+ lastResult = r
+
+ case <-timeout:
+ log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout)
+ log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
+
+ // cancel the goroutine and start our grace period timer
+ close(cancelCh)
+ timeout := time.After(refreshGracePeriod)
+
+ // we need a for loop and a label to break on, because we may have
+ // an extra response value to read, but still want to wait for the
+ // channel to close.
+ forSelect:
+ for {
+ select {
+ case r, ok := <-resCh:
+ if r.Done {
+ // the last refresh loop reached the desired state
+ return r.Result, r.Error
+ }
+
+ if !ok {
+ // the goroutine returned
+ break forSelect
+ }
+
+ // target state not reached, save the result for the
+ // TimeoutError and wait for the channel to close
+ lastResult = r
+ case <-timeout:
+ log.Println("[ERROR] WaitForState exceeded refresh grace period")
+ break forSelect
+ }
+ }
+
+ return nil, &TimeoutError{
+ LastError: lastResult.Error,
+ LastState: lastResult.State,
+ Timeout: conf.Timeout,
+ ExpectedState: conf.Target,
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
new file mode 100644
index 00000000..04367c53
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
@@ -0,0 +1,790 @@
+package resource
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strings"
+ "testing"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/hashicorp/go-getter"
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/logging"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+const TestEnvVar = "TF_ACC"
+
+// TestProvider can be implemented by any ResourceProvider to provide custom
+// reset functionality at the start of an acceptance test.
+// The helper/schema Provider implements this interface.
+type TestProvider interface {
+ TestReset() error
+}
+
+// TestCheckFunc is the callback type used with acceptance tests to check
+// the state of a resource. The state passed in is the latest state known,
+// or in the case of being after a destroy, it is the last known state when
+// it was created.
+type TestCheckFunc func(*terraform.State) error
+
+// ImportStateCheckFunc is the check function for ImportState tests
+type ImportStateCheckFunc func([]*terraform.InstanceState) error
+
+// TestCase is a single acceptance test case used to test the apply/destroy
+// lifecycle of a resource in a specific configuration.
+//
+// When the destroy plan is executed, the config from the last TestStep
+// is used to plan it.
+type TestCase struct {
+ // IsUnitTest allows a test to run regardless of the TF_ACC
+ // environment variable. This should be used with care - only for
+ // fast tests on local resources (e.g. remote state with a local
+ // backend) but can be used to increase confidence in correct
+ // operation of Terraform without waiting for a full acctest run.
+ IsUnitTest bool
+
+ // PreCheck, if non-nil, will be called before any test steps are
+ // executed. It will only be executed in the case that the steps
+ // would run, so it can be used for some validation before running
+ // acceptance tests, such as verifying that keys are setup.
+ PreCheck func()
+
+ // Providers is the ResourceProvider that will be under test.
+ //
+ // Alternately, ProviderFactories can be specified for the providers
+ // that are valid. This takes priority over Providers.
+ //
+ // The end effect of each is the same: specifying the providers that
+ // are used within the tests.
+ Providers map[string]terraform.ResourceProvider
+ ProviderFactories map[string]terraform.ResourceProviderFactory
+
+ // PreventPostDestroyRefresh can be set to true for cases where data sources
+ // are tested alongside real resources
+ PreventPostDestroyRefresh bool
+
+ // CheckDestroy is called after the resource is finally destroyed
+ // to allow the tester to test that the resource is truly gone.
+ CheckDestroy TestCheckFunc
+
+ // Steps are the apply sequences done within the context of the
+ // same state. Each step can have its own check to verify correctness.
+ Steps []TestStep
+
+ // The settings below control the "ID-only refresh test." This is
+ // an enabled-by-default test that tests that a refresh can be
+ // refreshed with only an ID to result in the same attributes.
+ // This validates completeness of Refresh.
+ //
+ // IDRefreshName is the name of the resource to check. This will
+ // default to the first non-nil primary resource in the state.
+ //
+ // IDRefreshIgnore is a list of configuration keys that will be ignored.
+ IDRefreshName string
+ IDRefreshIgnore []string
+}
+
+// TestStep is a single apply sequence of a test, done within the
+// context of a state.
+//
+// Multiple TestSteps can be sequenced in a Test to allow testing
+// potentially complex update logic. In general, simply create/destroy
+// tests will only need one step.
+type TestStep struct {
+ // ResourceName should be set to the name of the resource
+ // that is being tested. Example: "aws_instance.foo". Various test
+ // modes use this to auto-detect state information.
+ //
+ // This is only required if the test mode settings below say it is
+ // for the mode you're using.
+ ResourceName string
+
+ // PreConfig is called before the Config is applied to perform any per-step
+ // setup that needs to happen. This is called regardless of "test mode"
+ // below.
+ PreConfig func()
+
+ //---------------------------------------------------------------
+ // Test modes. One of the following groups of settings must be
+ // set to determine what the test step will do. Ideally we would've
+ // used Go interfaces here but there are now hundreds of tests we don't
+ // want to re-type so instead we just determine which step logic
+ // to run based on what settings below are set.
+ //---------------------------------------------------------------
+
+ //---------------------------------------------------------------
+ // Plan, Apply testing
+ //---------------------------------------------------------------
+
+ // Config a string of the configuration to give to Terraform. If this
+ // is set, then the TestCase will execute this step with the same logic
+ // as a `terraform apply`.
+ Config string
+
+ // Check is called after the Config is applied. Use this step to
+ // make your own API calls to check the status of things, and to
+ // inspect the format of the ResourceState itself.
+ //
+ // If an error is returned, the test will fail. In this case, a
+ // destroy plan will still be attempted.
+ //
+ // If this is nil, no check is done on this step.
+ Check TestCheckFunc
+
+ // Destroy will create a destroy plan if set to true.
+ Destroy bool
+
+ // ExpectNonEmptyPlan can be set to true for specific types of tests that are
+ // looking to verify that a diff occurs
+ ExpectNonEmptyPlan bool
+
+ // ExpectError allows the construction of test cases that we expect to fail
+ // with an error. The specified regexp must match against the error for the
+ // test to pass.
+ ExpectError *regexp.Regexp
+
+ // PlanOnly can be set to only run `plan` with this configuration, and not
+ // actually apply it. This is useful for ensuring config changes result in
+ // no-op plans
+ PlanOnly bool
+
+ // PreventPostDestroyRefresh can be set to true for cases where data sources
+ // are tested alongside real resources
+ PreventPostDestroyRefresh bool
+
+ //---------------------------------------------------------------
+ // ImportState testing
+ //---------------------------------------------------------------
+
+ // ImportState, if true, will test the functionality of ImportState
+ // by importing the resource with ResourceName (must be set) and the
+ // ID of that resource.
+ ImportState bool
+
+ // ImportStateId is the ID to perform an ImportState operation with.
+ // This is optional. If it isn't set, then the resource ID is automatically
+ // determined by inspecting the state for ResourceName's ID.
+ ImportStateId string
+
+ // ImportStateIdPrefix is the prefix added in front of ImportStateId.
+ // This can be useful in complex import cases, where more than one
+ // attribute needs to be passed on as the Import ID. Mainly in cases
+ // where the ID is not known, and a known prefix needs to be added to
+ // the unset ImportStateId field.
+ ImportStateIdPrefix string
+
+ // ImportStateCheck checks the results of ImportState. It should be
+ // used to verify that the resulting value of ImportState has the
+ // proper resources, IDs, and attributes.
+ ImportStateCheck ImportStateCheckFunc
+
+ // ImportStateVerify, if true, will also check that the state values
+ // that are finally put into the state after import match for all the
+ // IDs returned by the Import.
+ //
+ // ImportStateVerifyIgnore are fields that should not be verified to
+ // be equal. These can be set to ephemeral fields or fields that can't
+ // be refreshed and don't matter.
+ ImportStateVerify bool
+ ImportStateVerifyIgnore []string
+}
+
+// Test performs an acceptance test on a resource.
+//
+// Tests are not run unless an environmental variable "TF_ACC" is
+// set to some non-empty value. This is to avoid test cases surprising
+// a user by creating real resources.
+//
+// Tests will fail unless the verbose flag (`go test -v`, or explicitly
+// the "-test.v" flag) is set. Because some acceptance tests take quite
+// long, we require the verbose flag so users are able to see progress
+// output.
+func Test(t TestT, c TestCase) {
+ // We only run acceptance tests if an env var is set because they're
+ // slow and generally require some outside configuration. You can opt out
+ // of this with OverrideEnvVar on individual TestCases.
+ if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest {
+ t.Skip(fmt.Sprintf(
+ "Acceptance tests skipped unless env '%s' set",
+ TestEnvVar))
+ return
+ }
+
+ logWriter, err := logging.LogOutput()
+ if err != nil {
+ t.Error(fmt.Errorf("error setting up logging: %s", err))
+ }
+ log.SetOutput(logWriter)
+
+ // We require verbose mode so that the user knows what is going on.
+ if !testTesting && !testing.Verbose() && !c.IsUnitTest {
+ t.Fatal("Acceptance tests must be run with the -v flag on tests")
+ return
+ }
+
+ // Run the PreCheck if we have it
+ if c.PreCheck != nil {
+ c.PreCheck()
+ }
+
+ ctxProviders, err := testProviderFactories(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ opts := terraform.ContextOpts{Providers: ctxProviders}
+
+ // A single state variable to track the lifecycle, starting with no state
+ var state *terraform.State
+
+ // Go through each step and run it
+ var idRefreshCheck *terraform.ResourceState
+ idRefresh := c.IDRefreshName != ""
+ errored := false
+ for i, step := range c.Steps {
+ var err error
+ log.Printf("[WARN] Test: Executing step %d", i)
+
+ // Determine the test mode to execute
+ if step.Config != "" {
+ state, err = testStepConfig(opts, state, step)
+ } else if step.ImportState {
+ state, err = testStepImportState(opts, state, step)
+ } else {
+ err = fmt.Errorf(
+ "unknown test mode for step. Please see TestStep docs\n\n%#v",
+ step)
+ }
+
+ // If there was an error, exit
+ if err != nil {
+ // Perhaps we expected an error? Check if it matches
+ if step.ExpectError != nil {
+ if !step.ExpectError.MatchString(err.Error()) {
+ errored = true
+ t.Error(fmt.Sprintf(
+ "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n",
+ i, err, step.ExpectError))
+ break
+ }
+ } else {
+ errored = true
+ t.Error(fmt.Sprintf(
+ "Step %d error: %s", i, err))
+ break
+ }
+ }
+
+ // If we've never checked an id-only refresh and our state isn't
+ // empty, find the first resource and test it.
+ if idRefresh && idRefreshCheck == nil && !state.Empty() {
+ // Find the first non-nil resource in the state
+ for _, m := range state.Modules {
+ if len(m.Resources) > 0 {
+ if v, ok := m.Resources[c.IDRefreshName]; ok {
+ idRefreshCheck = v
+ }
+
+ break
+ }
+ }
+
+ // If we have an instance to check for refreshes, do it
+ // immediately. We do it in the middle of another test
+ // because it shouldn't affect the overall state (refresh
+ // is read-only semantically) and we want to fail early if
+ // this fails. If refresh isn't read-only, then this will have
+ // caught a different bug.
+ if idRefreshCheck != nil {
+ log.Printf(
+ "[WARN] Test: Running ID-only refresh check on %s",
+ idRefreshCheck.Primary.ID)
+ if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil {
+ log.Printf("[ERROR] Test: ID-only test failed: %s", err)
+ t.Error(fmt.Sprintf(
+ "[ERROR] Test: ID-only test failed: %s", err))
+ break
+ }
+ }
+ }
+ }
+
+ // If we never checked an id-only refresh, it is a failure.
+ if idRefresh {
+ if !errored && len(c.Steps) > 0 && idRefreshCheck == nil {
+ t.Error("ID-only refresh check never ran.")
+ }
+ }
+
+ // If we have a state, then run the destroy
+ if state != nil {
+ lastStep := c.Steps[len(c.Steps)-1]
+ destroyStep := TestStep{
+ Config: lastStep.Config,
+ Check: c.CheckDestroy,
+ Destroy: true,
+ PreventPostDestroyRefresh: c.PreventPostDestroyRefresh,
+ }
+
+ log.Printf("[WARN] Test: Executing destroy step")
+ state, err := testStep(opts, state, destroyStep)
+ if err != nil {
+ t.Error(fmt.Sprintf(
+ "Error destroying resource! WARNING: Dangling resources\n"+
+ "may exist. The full state and error is shown below.\n\n"+
+ "Error: %s\n\nState: %s",
+ err,
+ state))
+ }
+ } else {
+ log.Printf("[WARN] Skipping destroy test since there is no state.")
+ }
+}
+
+// testProviderFactories is a helper to build the ResourceProviderFactory map
+// with pre instantiated ResourceProviders, so that we can reset them for the
+// test, while only calling the factory function once.
+// Any errors are stored so that they can be returned by the factory in
+// terraform to match non-test behavior.
+func testProviderFactories(c TestCase) (map[string]terraform.ResourceProviderFactory, error) {
+ ctxProviders := c.ProviderFactories // make(map[string]terraform.ResourceProviderFactory)
+ if ctxProviders == nil {
+ ctxProviders = make(map[string]terraform.ResourceProviderFactory)
+ }
+ // add any fixed providers
+ for k, p := range c.Providers {
+ ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p)
+ }
+
+ // reset the providers if needed
+ for k, pf := range ctxProviders {
+ // we can ignore any errors here, if we don't have a provider to reset
+ // the error will be handled later
+ p, err := pf()
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := p.(TestProvider); ok {
+ err := p.TestReset()
+ if err != nil {
+ return nil, fmt.Errorf("[ERROR] failed to reset provider %q: %s", k, err)
+ }
+ }
+ }
+
+ return ctxProviders, nil
+}
+
+// UnitTest is a helper to force the acceptance testing harness to run in the
+// normal unit test suite. This should only be used for resource that don't
+// have any external dependencies.
+func UnitTest(t TestT, c TestCase) {
+ c.IsUnitTest = true
+ Test(t, c)
+}
+
+func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error {
+ // TODO: We guard by this right now so master doesn't explode. We
+ // need to remove this eventually to make this part of the normal tests.
+ if os.Getenv("TF_ACC_IDONLY") == "" {
+ return nil
+ }
+
+ name := fmt.Sprintf("%s.foo", r.Type)
+
+ // Build the state. The state is just the resource with an ID. There
+ // are no attributes. We only set what is needed to perform a refresh.
+ state := terraform.NewState()
+ state.RootModule().Resources[name] = &terraform.ResourceState{
+ Type: r.Type,
+ Primary: &terraform.InstanceState{
+ ID: r.Primary.ID,
+ },
+ }
+
+ // Create the config module. We use the full config because Refresh
+ // doesn't have access to it and we may need things like provider
+ // configurations. The initial implementation of id-only checks used
+ // an empty config module, but that caused the aforementioned problems.
+ mod, err := testModule(opts, step)
+ if err != nil {
+ return err
+ }
+
+ // Initialize the context
+ opts.Module = mod
+ opts.State = state
+ ctx, err := terraform.NewContext(&opts)
+ if err != nil {
+ return err
+ }
+ if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {
+ if len(es) > 0 {
+ estrs := make([]string, len(es))
+ for i, e := range es {
+ estrs[i] = e.Error()
+ }
+ return fmt.Errorf(
+ "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
+ ws, estrs)
+ }
+
+ log.Printf("[WARN] Config warnings: %#v", ws)
+ }
+
+ // Refresh!
+ state, err = ctx.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error refreshing: %s", err)
+ }
+
+ // Verify attribute equivalence.
+ actualR := state.RootModule().Resources[name]
+ if actualR == nil {
+ return fmt.Errorf("Resource gone!")
+ }
+ if actualR.Primary == nil {
+ return fmt.Errorf("Resource has no primary instance")
+ }
+ actual := actualR.Primary.Attributes
+ expected := r.Primary.Attributes
+ // Remove fields we're ignoring
+ for _, v := range c.IDRefreshIgnore {
+ for k, _ := range actual {
+ if strings.HasPrefix(k, v) {
+ delete(actual, k)
+ }
+ }
+ for k, _ := range expected {
+ if strings.HasPrefix(k, v) {
+ delete(expected, k)
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ // Determine only the different attributes
+ for k, v := range expected {
+ if av, ok := actual[k]; ok && v == av {
+ delete(expected, k)
+ delete(actual, k)
+ }
+ }
+
+ spewConf := spew.NewDefaultConfig()
+ spewConf.SortKeys = true
+ return fmt.Errorf(
+ "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
+ "\n\n%s\n\n%s",
+ spewConf.Sdump(actual), spewConf.Sdump(expected))
+ }
+
+ return nil
+}
+
+func testModule(
+ opts terraform.ContextOpts,
+ step TestStep) (*module.Tree, error) {
+ if step.PreConfig != nil {
+ step.PreConfig()
+ }
+
+ cfgPath, err := ioutil.TempDir("", "tf-test")
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error creating temporary directory for config: %s", err)
+ }
+ defer os.RemoveAll(cfgPath)
+
+ // Write the configuration
+ cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf"))
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error creating temporary file for config: %s", err)
+ }
+
+ _, err = io.Copy(cfgF, strings.NewReader(step.Config))
+ cfgF.Close()
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error creating temporary file for config: %s", err)
+ }
+
+ // Parse the configuration
+ mod, err := module.NewTreeModule("", cfgPath)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error loading configuration: %s", err)
+ }
+
+ // Load the modules
+ modStorage := &getter.FolderStorage{
+ StorageDir: filepath.Join(cfgPath, ".tfmodules"),
+ }
+ err = mod.Load(modStorage, module.GetModeGet)
+ if err != nil {
+ return nil, fmt.Errorf("Error downloading modules: %s", err)
+ }
+
+ return mod, nil
+}
+
+func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) {
+ if c.ResourceName == "" {
+ return nil, fmt.Errorf("ResourceName must be set in TestStep")
+ }
+
+ for _, m := range state.Modules {
+ if len(m.Resources) > 0 {
+ if v, ok := m.Resources[c.ResourceName]; ok {
+ return v, nil
+ }
+ }
+ }
+
+ return nil, fmt.Errorf(
+ "Resource specified by ResourceName couldn't be found: %s", c.ResourceName)
+}
+
+// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into
+// a single TestCheckFunc.
+//
+// As a user testing their provider, this lets you decompose your checks
+// into smaller pieces more easily.
+func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {
+ return func(s *terraform.State) error {
+ for i, f := range fs {
+ if err := f(s); err != nil {
+ return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)
+ }
+ }
+
+ return nil
+ }
+}
+
+// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into
+// a single TestCheckFunc.
+//
+// As a user testing their provider, this lets you decompose your checks
+// into smaller pieces more easily.
+//
+// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the
+// TestCheckFuncs and aggregates failures.
+func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {
+ return func(s *terraform.State) error {
+ var result *multierror.Error
+
+ for i, f := range fs {
+ if err := f(s); err != nil {
+ result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err))
+ }
+ }
+
+ return result.ErrorOrNil()
+ }
+}
+
+// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value
+// exists in state for the given name/key combination. It is useful when
+// testing that computed values were set, when it is not possible to
+// know ahead of time what the values will be.
+func TestCheckResourceAttrSet(name, key string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ is, err := primaryInstanceState(s, name)
+ if err != nil {
+ return err
+ }
+
+ if val, ok := is.Attributes[key]; ok && val != "" {
+ return nil
+ }
+
+ return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key)
+ }
+}
+
+// TestCheckResourceAttr is a TestCheckFunc which validates
+// the value in state for the given name/key combination.
+func TestCheckResourceAttr(name, key, value string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ is, err := primaryInstanceState(s, name)
+ if err != nil {
+ return err
+ }
+
+ if v, ok := is.Attributes[key]; !ok || v != value {
+ if !ok {
+ return fmt.Errorf("%s: Attribute '%s' not found", name, key)
+ }
+
+ return fmt.Errorf(
+ "%s: Attribute '%s' expected %#v, got %#v",
+ name,
+ key,
+ value,
+ v)
+ }
+
+ return nil
+ }
+}
+
+// TestCheckNoResourceAttr is a TestCheckFunc which ensures that
+// NO value exists in state for the given name/key combination.
+func TestCheckNoResourceAttr(name, key string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ is, err := primaryInstanceState(s, name)
+ if err != nil {
+ return err
+ }
+
+ if _, ok := is.Attributes[key]; ok {
+ return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key)
+ }
+
+ return nil
+ }
+}
+
+// TestMatchResourceAttr is a TestCheckFunc which checks that the value
+// in state for the given name/key combination matches the given regex.
+func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {
+ return func(s *terraform.State) error {
+ is, err := primaryInstanceState(s, name)
+ if err != nil {
+ return err
+ }
+
+ if !r.MatchString(is.Attributes[key]) {
+ return fmt.Errorf(
+ "%s: Attribute '%s' didn't match %q, got %#v",
+ name,
+ key,
+ r.String(),
+ is.Attributes[key])
+ }
+
+ return nil
+ }
+}
+
+// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the
+// value is a pointer so that it can be updated while the test is running.
+// It will only be dereferenced at the point this step is run.
+func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ return TestCheckResourceAttr(name, key, *value)(s)
+ }
+}
+
+// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values
+// in state for a pair of name/key combinations are equal.
+func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ isFirst, err := primaryInstanceState(s, nameFirst)
+ if err != nil {
+ return err
+ }
+ vFirst, ok := isFirst.Attributes[keyFirst]
+ if !ok {
+ return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst)
+ }
+
+ isSecond, err := primaryInstanceState(s, nameSecond)
+ if err != nil {
+ return err
+ }
+ vSecond, ok := isSecond.Attributes[keySecond]
+ if !ok {
+ return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond)
+ }
+
+ if vFirst != vSecond {
+ return fmt.Errorf(
+ "%s: Attribute '%s' expected %#v, got %#v",
+ nameFirst,
+ keyFirst,
+ vSecond,
+ vFirst)
+ }
+
+ return nil
+ }
+}
+
+// TestCheckOutput checks an output in the Terraform configuration
+func TestCheckOutput(name, value string) TestCheckFunc {
+ return func(s *terraform.State) error {
+ ms := s.RootModule()
+ rs, ok := ms.Outputs[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ if rs.Value != value {
+ return fmt.Errorf(
+ "Output '%s': expected %#v, got %#v",
+ name,
+ value,
+ rs)
+ }
+
+ return nil
+ }
+}
+
+func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc {
+ return func(s *terraform.State) error {
+ ms := s.RootModule()
+ rs, ok := ms.Outputs[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ if !r.MatchString(rs.Value.(string)) {
+ return fmt.Errorf(
+ "Output '%s': %#v didn't match %q",
+ name,
+ rs,
+ r.String())
+ }
+
+ return nil
+ }
+}
+
+// TestT is the interface used to handle the test lifecycle of a test.
+//
+// Users should just use a *testing.T object, which implements this.
+type TestT interface {
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Skip(args ...interface{})
+}
+
+// This is set to true by unit tests to alter some behavior
+var testTesting = false
+
+// primaryInstanceState returns the primary instance state for the given resource name.
+func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) {
+ ms := s.RootModule()
+ rs, ok := ms.Resources[name]
+ if !ok {
+ return nil, fmt.Errorf("Not found: %s", name)
+ }
+
+ is := rs.Primary
+ if is == nil {
+ return nil, fmt.Errorf("No primary instance: %s", name)
+ }
+
+ return is, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
new file mode 100644
index 00000000..537a11c3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
@@ -0,0 +1,160 @@
+package resource
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// testStepConfig runs a config-mode test step
+func testStepConfig(
+ opts terraform.ContextOpts,
+ state *terraform.State,
+ step TestStep) (*terraform.State, error) {
+ return testStep(opts, state, step)
+}
+
+func testStep(
+ opts terraform.ContextOpts,
+ state *terraform.State,
+ step TestStep) (*terraform.State, error) {
+ mod, err := testModule(opts, step)
+ if err != nil {
+ return state, err
+ }
+
+ // Build the context
+ opts.Module = mod
+ opts.State = state
+ opts.Destroy = step.Destroy
+ ctx, err := terraform.NewContext(&opts)
+ if err != nil {
+ return state, fmt.Errorf("Error initializing context: %s", err)
+ }
+ if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {
+ if len(es) > 0 {
+ estrs := make([]string, len(es))
+ for i, e := range es {
+ estrs[i] = e.Error()
+ }
+ return state, fmt.Errorf(
+ "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
+ ws, estrs)
+ }
+ log.Printf("[WARN] Config warnings: %#v", ws)
+ }
+
+ // Refresh!
+ state, err = ctx.Refresh()
+ if err != nil {
+ return state, fmt.Errorf(
+ "Error refreshing: %s", err)
+ }
+
+ // If this step is a PlanOnly step, skip over this first Plan and subsequent
+ // Apply, and use the follow up Plan that checks for perpetual diffs
+ if !step.PlanOnly {
+ // Plan!
+ if p, err := ctx.Plan(); err != nil {
+ return state, fmt.Errorf(
+ "Error planning: %s", err)
+ } else {
+ log.Printf("[WARN] Test: Step plan: %s", p)
+ }
+
+ // We need to keep a copy of the state prior to destroying
+ // such that destroy steps can verify their behaviour in the check
+ // function
+ stateBeforeApplication := state.DeepCopy()
+
+ // Apply!
+ state, err = ctx.Apply()
+ if err != nil {
+ return state, fmt.Errorf("Error applying: %s", err)
+ }
+
+ // Check! Excitement!
+ if step.Check != nil {
+ if step.Destroy {
+ if err := step.Check(stateBeforeApplication); err != nil {
+ return state, fmt.Errorf("Check failed: %s", err)
+ }
+ } else {
+ if err := step.Check(state); err != nil {
+ return state, fmt.Errorf("Check failed: %s", err)
+ }
+ }
+ }
+ }
+
+ // Now, verify that Plan is now empty and we don't have a perpetual diff issue
+ // We do this with TWO plans. One without a refresh.
+ var p *terraform.Plan
+ if p, err = ctx.Plan(); err != nil {
+ return state, fmt.Errorf("Error on follow-up plan: %s", err)
+ }
+ if p.Diff != nil && !p.Diff.Empty() {
+ if step.ExpectNonEmptyPlan {
+ log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p)
+ } else {
+ return state, fmt.Errorf(
+ "After applying this step, the plan was not empty:\n\n%s", p)
+ }
+ }
+
+ // And another after a Refresh.
+ if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) {
+ state, err = ctx.Refresh()
+ if err != nil {
+ return state, fmt.Errorf(
+ "Error on follow-up refresh: %s", err)
+ }
+ }
+ if p, err = ctx.Plan(); err != nil {
+ return state, fmt.Errorf("Error on second follow-up plan: %s", err)
+ }
+ empty := p.Diff == nil || p.Diff.Empty()
+
+ // Data resources are tricky because they legitimately get instantiated
+ // during refresh so that they will be already populated during the
+ // plan walk. Because of this, if we have any data resources in the
+ // config we'll end up wanting to destroy them again here. This is
+ // acceptable and expected, and we'll treat it as "empty" for the
+ // sake of this testing.
+ if step.Destroy {
+ empty = true
+
+ for _, moduleDiff := range p.Diff.Modules {
+ for k, instanceDiff := range moduleDiff.Resources {
+ if !strings.HasPrefix(k, "data.") {
+ empty = false
+ break
+ }
+
+ if !instanceDiff.Destroy {
+ empty = false
+ }
+ }
+ }
+ }
+
+ if !empty {
+ if step.ExpectNonEmptyPlan {
+ log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p)
+ } else {
+ return state, fmt.Errorf(
+ "After applying this step and refreshing, "+
+ "the plan was not empty:\n\n%s", p)
+ }
+ }
+
+ // Made it here, but expected a non-empty plan, fail!
+ if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) {
+ return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!")
+ }
+
+ // Made it here? Good job test step!
+ return state, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
new file mode 100644
index 00000000..28ad1052
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
@@ -0,0 +1,141 @@
+package resource
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strings"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// testStepImportState runs an imort state test step
+func testStepImportState(
+ opts terraform.ContextOpts,
+ state *terraform.State,
+ step TestStep) (*terraform.State, error) {
+ // Determine the ID to import
+ importId := step.ImportStateId
+ if importId == "" {
+ resource, err := testResource(step, state)
+ if err != nil {
+ return state, err
+ }
+
+ importId = resource.Primary.ID
+ }
+ importPrefix := step.ImportStateIdPrefix
+ if importPrefix != "" {
+ importId = fmt.Sprintf("%s%s", importPrefix, importId)
+ }
+
+ // Setup the context. We initialize with an empty state. We use the
+ // full config for provider configurations.
+ mod, err := testModule(opts, step)
+ if err != nil {
+ return state, err
+ }
+
+ opts.Module = mod
+ opts.State = terraform.NewState()
+ ctx, err := terraform.NewContext(&opts)
+ if err != nil {
+ return state, err
+ }
+
+ // Do the import!
+ newState, err := ctx.Import(&terraform.ImportOpts{
+ // Set the module so that any provider config is loaded
+ Module: mod,
+
+ Targets: []*terraform.ImportTarget{
+ &terraform.ImportTarget{
+ Addr: step.ResourceName,
+ ID: importId,
+ },
+ },
+ })
+ if err != nil {
+ log.Printf("[ERROR] Test: ImportState failure: %s", err)
+ return state, err
+ }
+
+ // Go through the new state and verify
+ if step.ImportStateCheck != nil {
+ var states []*terraform.InstanceState
+ for _, r := range newState.RootModule().Resources {
+ if r.Primary != nil {
+ states = append(states, r.Primary)
+ }
+ }
+ if err := step.ImportStateCheck(states); err != nil {
+ return state, err
+ }
+ }
+
+ // Verify that all the states match
+ if step.ImportStateVerify {
+ new := newState.RootModule().Resources
+ old := state.RootModule().Resources
+ for _, r := range new {
+ // Find the existing resource
+ var oldR *terraform.ResourceState
+ for _, r2 := range old {
+ if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type {
+ oldR = r2
+ break
+ }
+ }
+ if oldR == nil {
+ return state, fmt.Errorf(
+ "Failed state verification, resource with ID %s not found",
+ r.Primary.ID)
+ }
+
+ // Compare their attributes
+ actual := make(map[string]string)
+ for k, v := range r.Primary.Attributes {
+ actual[k] = v
+ }
+ expected := make(map[string]string)
+ for k, v := range oldR.Primary.Attributes {
+ expected[k] = v
+ }
+
+ // Remove fields we're ignoring
+ for _, v := range step.ImportStateVerifyIgnore {
+ for k, _ := range actual {
+ if strings.HasPrefix(k, v) {
+ delete(actual, k)
+ }
+ }
+ for k, _ := range expected {
+ if strings.HasPrefix(k, v) {
+ delete(expected, k)
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ // Determine only the different attributes
+ for k, v := range expected {
+ if av, ok := actual[k]; ok && v == av {
+ delete(expected, k)
+ delete(actual, k)
+ }
+ }
+
+ spewConf := spew.NewDefaultConfig()
+ spewConf.SortKeys = true
+ return state, fmt.Errorf(
+ "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
+ "\n\n%s\n\n%s",
+ spewConf.Sdump(actual), spewConf.Sdump(expected))
+ }
+ }
+ }
+
+ // Return the old state (non-imported) so we don't change anything.
+ return state, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
new file mode 100644
index 00000000..ca50e292
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
@@ -0,0 +1,84 @@
+package resource
+
+import (
+ "sync"
+ "time"
+)
+
+// Retry is a basic wrapper around StateChangeConf that will just retry
+// a function until it no longer returns an error.
+func Retry(timeout time.Duration, f RetryFunc) error {
+ // These are used to pull the error out of the function; need a mutex to
+ // avoid a data race.
+ var resultErr error
+ var resultErrMu sync.Mutex
+
+ c := &StateChangeConf{
+ Pending: []string{"retryableerror"},
+ Target: []string{"success"},
+ Timeout: timeout,
+ MinTimeout: 500 * time.Millisecond,
+ Refresh: func() (interface{}, string, error) {
+ rerr := f()
+
+ resultErrMu.Lock()
+ defer resultErrMu.Unlock()
+
+ if rerr == nil {
+ resultErr = nil
+ return 42, "success", nil
+ }
+
+ resultErr = rerr.Err
+
+ if rerr.Retryable {
+ return 42, "retryableerror", nil
+ }
+ return nil, "quit", rerr.Err
+ },
+ }
+
+ _, waitErr := c.WaitForState()
+
+ // Need to acquire the lock here to be able to avoid race using resultErr as
+ // the return value
+ resultErrMu.Lock()
+ defer resultErrMu.Unlock()
+
+ // resultErr may be nil because the wait timed out and resultErr was never
+ // set; this is still an error
+ if resultErr == nil {
+ return waitErr
+ }
+ // resultErr takes precedence over waitErr if both are set because it is
+ // more likely to be useful
+ return resultErr
+}
+
+// RetryFunc is the function retried until it succeeds.
+type RetryFunc func() *RetryError
+
+// RetryError is the required return type of RetryFunc. It forces client code
+// to choose whether or not a given error is retryable.
+type RetryError struct {
+ Err error
+ Retryable bool
+}
+
+// RetryableError is a helper to create a RetryError that's retryable from a
+// given error.
+func RetryableError(err error) *RetryError {
+ if err == nil {
+ return nil
+ }
+ return &RetryError{Err: err, Retryable: true}
+}
+
+// NonRetryableError is a helper to create a RetryError that's _not)_ retryable
+// from a given error.
+func NonRetryableError(err error) *RetryError {
+ if err == nil {
+ return nil
+ }
+ return &RetryError{Err: err, Retryable: false}
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/README.md b/vendor/github.com/hashicorp/terraform/helper/schema/README.md
new file mode 100644
index 00000000..28c83628
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/README.md
@@ -0,0 +1,11 @@
+# Terraform Helper Lib: schema
+
+The `schema` package provides a high-level interface for writing resource
+providers for Terraform.
+
+If you're writing a resource provider, we recommend you use this package.
+
+The interface exposed by this package is much friendlier than trying to
+write to the Terraform API directly. The core Terraform API is low-level
+and built for maximum flexibility and control, whereas this library is built
+as a framework around that to more easily write common providers.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
new file mode 100644
index 00000000..a0729c02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
@@ -0,0 +1,94 @@
+package schema
+
+import (
+ "context"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Backend represents a partial backend.Backend implementation and simplifies
+// the creation of configuration loading and validation.
+//
+// Unlike other schema structs such as Provider, this struct is meant to be
+// embedded within your actual implementation. It provides implementations
+// only for Input and Configure and gives you a method for accessing the
+// configuration in the form of a ResourceData that you're expected to call
+// from the other implementation funcs.
+type Backend struct {
+ // Schema is the schema for the configuration of this backend. If this
+ // Backend has no configuration this can be omitted.
+ Schema map[string]*Schema
+
+ // ConfigureFunc is called to configure the backend. Use the
+ // FromContext* methods to extract information from the context.
+ // This can be nil, in which case nothing will be called but the
+ // config will still be stored.
+ ConfigureFunc func(context.Context) error
+
+ config *ResourceData
+}
+
+var (
+ backendConfigKey = contextKey("backend config")
+)
+
+// FromContextBackendConfig extracts a ResourceData with the configuration
+// from the context. This should only be called by Backend functions.
+func FromContextBackendConfig(ctx context.Context) *ResourceData {
+ return ctx.Value(backendConfigKey).(*ResourceData)
+}
+
+func (b *Backend) Input(
+ input terraform.UIInput,
+ c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+ if b == nil {
+ return c, nil
+ }
+
+ return schemaMap(b.Schema).Input(input, c)
+}
+
+func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ if b == nil {
+ return nil, nil
+ }
+
+ return schemaMap(b.Schema).Validate(c)
+}
+
+func (b *Backend) Configure(c *terraform.ResourceConfig) error {
+ if b == nil {
+ return nil
+ }
+
+ sm := schemaMap(b.Schema)
+
+ // Get a ResourceData for this configuration. To do this, we actually
+ // generate an intermediary "diff" although that is never exposed.
+ diff, err := sm.Diff(nil, c)
+ if err != nil {
+ return err
+ }
+
+ data, err := sm.Data(nil, diff)
+ if err != nil {
+ return err
+ }
+ b.config = data
+
+ if b.ConfigureFunc != nil {
+ err = b.ConfigureFunc(context.WithValue(
+ context.Background(), backendConfigKey, data))
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Config returns the configuration. This is available after Configure is
+// called.
+func (b *Backend) Config() *ResourceData {
+ return b.config
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
new file mode 100644
index 00000000..5a03d2d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
@@ -0,0 +1,59 @@
+package schema
+
+import (
+ "fmt"
+)
+
+// DataSourceResourceShim takes a Resource instance describing a data source
+// (with a Read implementation and a Schema, at least) and returns a new
+// Resource instance with additional Create and Delete implementations that
+// allow the data source to be used as a resource.
+//
+// This is a backward-compatibility layer for data sources that were formerly
+// read-only resources before the data source concept was added. It should not
+// be used for any *new* data sources.
+//
+// The Read function for the data source *must* call d.SetId with a non-empty
+// id in order for this shim to function as expected.
+//
+// The provided Resource instance, and its schema, will be modified in-place
+// to make it suitable for use as a full resource.
+func DataSourceResourceShim(name string, dataSource *Resource) *Resource {
+ // Recursively, in-place adjust the schema so that it has ForceNew
+ // on any user-settable resource.
+ dataSourceResourceShimAdjustSchema(dataSource.Schema)
+
+ dataSource.Create = CreateFunc(dataSource.Read)
+ dataSource.Delete = func(d *ResourceData, meta interface{}) error {
+ d.SetId("")
+ return nil
+ }
+ dataSource.Update = nil // should already be nil, but let's make sure
+
+ // FIXME: Link to some further docs either on the website or in the
+ // changelog, once such a thing exists.
+ dataSource.deprecationMessage = fmt.Sprintf(
+ "using %s as a resource is deprecated; consider using the data source instead",
+ name,
+ )
+
+ return dataSource
+}
+
+func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) {
+ for _, s := range schema {
+ // If the attribute is configurable then it must be ForceNew,
+ // since we have no Update implementation.
+ if s.Required || s.Optional {
+ s.ForceNew = true
+ }
+
+ // If the attribute is a nested resource, we need to recursively
+ // apply these same adjustments to it.
+ if s.Elem != nil {
+ if r, ok := s.Elem.(*Resource); ok {
+ dataSourceResourceShimAdjustSchema(r.Schema)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go
new file mode 100644
index 00000000..d5e20e03
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go
@@ -0,0 +1,6 @@
+package schema
+
+// Equal is an interface that checks for deep equality between two objects.
+type Equal interface {
+ Equal(interface{}) bool
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
new file mode 100644
index 00000000..1660a670
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
@@ -0,0 +1,334 @@
+package schema
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// FieldReaders are responsible for decoding fields out of data into
+// the proper typed representation. ResourceData uses this to query data
+// out of multiple sources: config, state, diffs, etc.
+type FieldReader interface {
+ ReadField([]string) (FieldReadResult, error)
+}
+
+// FieldReadResult encapsulates all the resulting data from reading
+// a field.
+type FieldReadResult struct {
+ // Value is the actual read value. NegValue is the _negative_ value
+ // or the items that should be removed (if they existed). NegValue
+ // doesn't make sense for primitives but is important for any
+ // container types such as maps, sets, lists.
+ Value interface{}
+ ValueProcessed interface{}
+
+ // Exists is true if the field was found in the data. False means
+ // it wasn't found if there was no error.
+ Exists bool
+
+ // Computed is true if the field was found but the value
+ // is computed.
+ Computed bool
+}
+
+// ValueOrZero returns the value of this result or the zero value of the
+// schema type, ensuring a consistent non-nil return value.
+func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} {
+ if r.Value != nil {
+ return r.Value
+ }
+
+ return s.ZeroValue()
+}
+
+// addrToSchema finds the final element schema for the given address
+// and the given schema. It returns all the schemas that led to the final
+// schema. These are in order of the address (out to in).
+func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema {
+ current := &Schema{
+ Type: typeObject,
+ Elem: schemaMap,
+ }
+
+ // If we aren't given an address, then the user is requesting the
+ // full object, so we return the special value which is the full object.
+ if len(addr) == 0 {
+ return []*Schema{current}
+ }
+
+ result := make([]*Schema, 0, len(addr))
+ for len(addr) > 0 {
+ k := addr[0]
+ addr = addr[1:]
+
+ REPEAT:
+ // We want to trim off the first "typeObject" since its not a
+ // real lookup that people do. i.e. []string{"foo"} in a structure
+ // isn't {typeObject, typeString}, its just a {typeString}.
+ if len(result) > 0 || current.Type != typeObject {
+ result = append(result, current)
+ }
+
+ switch t := current.Type; t {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ if len(addr) > 0 {
+ return nil
+ }
+ case TypeList, TypeSet:
+ isIndex := len(addr) > 0 && addr[0] == "#"
+
+ switch v := current.Elem.(type) {
+ case *Resource:
+ current = &Schema{
+ Type: typeObject,
+ Elem: v.Schema,
+ }
+ case *Schema:
+ current = v
+ case ValueType:
+ current = &Schema{Type: v}
+ default:
+ // we may not know the Elem type and are just looking for the
+ // index
+ if isIndex {
+ break
+ }
+
+ if len(addr) == 0 {
+ // we've processed the address, so return what we've
+ // collected
+ return result
+ }
+
+ if len(addr) == 1 {
+ if _, err := strconv.Atoi(addr[0]); err == nil {
+ // we're indexing a value without a schema. This can
+ // happen if the list is nested in another schema type.
+ // Default to a TypeString like we do with a map
+ current = &Schema{Type: TypeString}
+ break
+ }
+ }
+
+ return nil
+ }
+
+ // If we only have one more thing and the next thing
+ // is a #, then we're accessing the index which is always
+ // an int.
+ if isIndex {
+ current = &Schema{Type: TypeInt}
+ break
+ }
+
+ case TypeMap:
+ if len(addr) > 0 {
+ switch v := current.Elem.(type) {
+ case ValueType:
+ current = &Schema{Type: v}
+ default:
+ // maps default to string values. This is all we can have
+ // if this is nested in another list or map.
+ current = &Schema{Type: TypeString}
+ }
+ }
+ case typeObject:
+ // If we're already in the object, then we want to handle Sets
+ // and Lists specially. Basically, their next key is the lookup
+ // key (the set value or the list element). For these scenarios,
+ // we just want to skip it and move to the next element if there
+ // is one.
+ if len(result) > 0 {
+ lastType := result[len(result)-2].Type
+ if lastType == TypeSet || lastType == TypeList {
+ if len(addr) == 0 {
+ break
+ }
+
+ k = addr[0]
+ addr = addr[1:]
+ }
+ }
+
+ m := current.Elem.(map[string]*Schema)
+ val, ok := m[k]
+ if !ok {
+ return nil
+ }
+
+ current = val
+ goto REPEAT
+ }
+ }
+
+ return result
+}
+
+// readListField is a generic method for reading a list field out of a
+// a FieldReader. It does this based on the assumption that there is a key
+// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc.
+// after that point.
+func readListField(
+ r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) {
+ addrPadded := make([]string, len(addr)+1)
+ copy(addrPadded, addr)
+ addrPadded[len(addrPadded)-1] = "#"
+
+ // Get the number of elements in the list
+ countResult, err := r.ReadField(addrPadded)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !countResult.Exists {
+ // No count, means we have no list
+ countResult.Value = 0
+ }
+
+ // If we have an empty list, then return an empty list
+ if countResult.Computed || countResult.Value.(int) == 0 {
+ return FieldReadResult{
+ Value: []interface{}{},
+ Exists: countResult.Exists,
+ Computed: countResult.Computed,
+ }, nil
+ }
+
+ // Go through each count, and get the item value out of it
+ result := make([]interface{}, countResult.Value.(int))
+ for i, _ := range result {
+ is := strconv.FormatInt(int64(i), 10)
+ addrPadded[len(addrPadded)-1] = is
+ rawResult, err := r.ReadField(addrPadded)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !rawResult.Exists {
+ // This should never happen, because by the time the data
+ // gets to the FieldReaders, all the defaults should be set by
+ // Schema.
+ rawResult.Value = nil
+ }
+
+ result[i] = rawResult.Value
+ }
+
+ return FieldReadResult{
+ Value: result,
+ Exists: true,
+ }, nil
+}
+
+// readObjectField is a generic method for reading objects out of FieldReaders
+// based on the assumption that building an address of []string{k, FIELD}
+// will result in the proper field data.
+func readObjectField(
+ r FieldReader,
+ addr []string,
+ schema map[string]*Schema) (FieldReadResult, error) {
+ result := make(map[string]interface{})
+ exists := false
+ for field, s := range schema {
+ addrRead := make([]string, len(addr), len(addr)+1)
+ copy(addrRead, addr)
+ addrRead = append(addrRead, field)
+ rawResult, err := r.ReadField(addrRead)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if rawResult.Exists {
+ exists = true
+ }
+
+ result[field] = rawResult.ValueOrZero(s)
+ }
+
+ return FieldReadResult{
+ Value: result,
+ Exists: exists,
+ }, nil
+}
+
+// convert map values to the proper primitive type based on schema.Elem
+func mapValuesToPrimitive(m map[string]interface{}, schema *Schema) error {
+
+ elemType := TypeString
+ if et, ok := schema.Elem.(ValueType); ok {
+ elemType = et
+ }
+
+ switch elemType {
+ case TypeInt, TypeFloat, TypeBool:
+ for k, v := range m {
+ vs, ok := v.(string)
+ if !ok {
+ continue
+ }
+
+ v, err := stringToPrimitive(vs, false, &Schema{Type: elemType})
+ if err != nil {
+ return err
+ }
+
+ m[k] = v
+ }
+ }
+ return nil
+}
+
+func stringToPrimitive(
+ value string, computed bool, schema *Schema) (interface{}, error) {
+ var returnVal interface{}
+ switch schema.Type {
+ case TypeBool:
+ if value == "" {
+ returnVal = false
+ break
+ }
+ if computed {
+ break
+ }
+
+ v, err := strconv.ParseBool(value)
+ if err != nil {
+ return nil, err
+ }
+
+ returnVal = v
+ case TypeFloat:
+ if value == "" {
+ returnVal = 0.0
+ break
+ }
+ if computed {
+ break
+ }
+
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ returnVal = v
+ case TypeInt:
+ if value == "" {
+ returnVal = 0
+ break
+ }
+ if computed {
+ break
+ }
+
+ v, err := strconv.ParseInt(value, 0, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ returnVal = int(v)
+ case TypeString:
+ returnVal = value
+ default:
+ panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+ }
+
+ return returnVal, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
new file mode 100644
index 00000000..f958bbcb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
@@ -0,0 +1,333 @@
+package schema
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/mapstructure"
+)
+
+// ConfigFieldReader reads fields out of an untyped map[string]string to the
+// best of its ability. It also applies defaults from the Schema. (The other
+// field readers do not need default handling because they source fully
+// populated data structures.)
+type ConfigFieldReader struct {
+ Config *terraform.ResourceConfig
+ Schema map[string]*Schema
+
+ indexMaps map[string]map[string]int
+ once sync.Once
+}
+
+func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) })
+ return r.readField(address, false)
+}
+
+func (r *ConfigFieldReader) readField(
+ address []string, nested bool) (FieldReadResult, error) {
+ schemaList := addrToSchema(address, r.Schema)
+ if len(schemaList) == 0 {
+ return FieldReadResult{}, nil
+ }
+
+ if !nested {
+ // If we have a set anywhere in the address, then we need to
+ // read that set out in order and actually replace that part of
+ // the address with the real list index. i.e. set.50 might actually
+ // map to set.12 in the config, since it is in list order in the
+ // config, not indexed by set value.
+ for i, v := range schemaList {
+ // Sets are the only thing that cause this issue.
+ if v.Type != TypeSet {
+ continue
+ }
+
+ // If we're at the end of the list, then we don't have to worry
+ // about this because we're just requesting the whole set.
+ if i == len(schemaList)-1 {
+ continue
+ }
+
+ // If we're looking for the count, then ignore...
+ if address[i+1] == "#" {
+ continue
+ }
+
+ indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")]
+ if !ok {
+ // Get the set so we can get the index map that tells us the
+ // mapping of the hash code to the list index
+ _, err := r.readSet(address[:i+1], v)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ indexMap = r.indexMaps[strings.Join(address[:i+1], ".")]
+ }
+
+ index, ok := indexMap[address[i+1]]
+ if !ok {
+ return FieldReadResult{}, nil
+ }
+
+ address[i+1] = strconv.FormatInt(int64(index), 10)
+ }
+ }
+
+ k := strings.Join(address, ".")
+ schema := schemaList[len(schemaList)-1]
+
+ // If we're getting the single element of a promoted list, then
+ // check to see if we have a single element we need to promote.
+ if address[len(address)-1] == "0" && len(schemaList) > 1 {
+ lastSchema := schemaList[len(schemaList)-2]
+ if lastSchema.Type == TypeList && lastSchema.PromoteSingle {
+ k := strings.Join(address[:len(address)-1], ".")
+ result, err := r.readPrimitive(k, schema)
+ if err == nil {
+ return result, nil
+ }
+ }
+ }
+
+ switch schema.Type {
+ case TypeBool, TypeFloat, TypeInt, TypeString:
+ return r.readPrimitive(k, schema)
+ case TypeList:
+ // If we support promotion then we first check if we have a lone
+ // value that we must promote.
+ // a value that is alone.
+ if schema.PromoteSingle {
+ result, err := r.readPrimitive(k, schema.Elem.(*Schema))
+ if err == nil && result.Exists {
+ result.Value = []interface{}{result.Value}
+ return result, nil
+ }
+ }
+
+ return readListField(&nestedConfigFieldReader{r}, address, schema)
+ case TypeMap:
+ return r.readMap(k, schema)
+ case TypeSet:
+ return r.readSet(address, schema)
+ case typeObject:
+ return readObjectField(
+ &nestedConfigFieldReader{r},
+ address, schema.Elem.(map[string]*Schema))
+ default:
+ panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+ }
+}
+
+func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
+ // We want both the raw value and the interpolated. We use the interpolated
+ // to store actual values and we use the raw one to check for
+ // computed keys. Actual values are obtained in the switch, depending on
+ // the type of the raw value.
+ mraw, ok := r.Config.GetRaw(k)
+ if !ok {
+ // check if this is from an interpolated field by seeing if it exists
+ // in the config
+ _, ok := r.Config.Get(k)
+ if !ok {
+ // this really doesn't exist
+ return FieldReadResult{}, nil
+ }
+
+ // We couldn't fetch the value from a nested data structure, so treat the
+ // raw value as an interpolation string. The mraw value is only used
+ // for the type switch below.
+ mraw = "${INTERPOLATED}"
+ }
+
+ result := make(map[string]interface{})
+ computed := false
+ switch m := mraw.(type) {
+ case string:
+ // This is a map which has come out of an interpolated variable, so we
+ // can just get the value directly from config. Values cannot be computed
+ // currently.
+ v, _ := r.Config.Get(k)
+
+ // If this isn't a map[string]interface, it must be computed.
+ mapV, ok := v.(map[string]interface{})
+ if !ok {
+ return FieldReadResult{
+ Exists: true,
+ Computed: true,
+ }, nil
+ }
+
+ // Otherwise we can proceed as usual.
+ for i, iv := range mapV {
+ result[i] = iv
+ }
+ case []interface{}:
+ for i, innerRaw := range m {
+ for ik := range innerRaw.(map[string]interface{}) {
+ key := fmt.Sprintf("%s.%d.%s", k, i, ik)
+ if r.Config.IsComputed(key) {
+ computed = true
+ break
+ }
+
+ v, _ := r.Config.Get(key)
+ result[ik] = v
+ }
+ }
+ case []map[string]interface{}:
+ for i, innerRaw := range m {
+ for ik := range innerRaw {
+ key := fmt.Sprintf("%s.%d.%s", k, i, ik)
+ if r.Config.IsComputed(key) {
+ computed = true
+ break
+ }
+
+ v, _ := r.Config.Get(key)
+ result[ik] = v
+ }
+ }
+ case map[string]interface{}:
+ for ik := range m {
+ key := fmt.Sprintf("%s.%s", k, ik)
+ if r.Config.IsComputed(key) {
+ computed = true
+ break
+ }
+
+ v, _ := r.Config.Get(key)
+ result[ik] = v
+ }
+ default:
+ panic(fmt.Sprintf("unknown type: %#v", mraw))
+ }
+
+ err := mapValuesToPrimitive(result, schema)
+ if err != nil {
+ return FieldReadResult{}, nil
+ }
+
+ var value interface{}
+ if !computed {
+ value = result
+ }
+
+ return FieldReadResult{
+ Value: value,
+ Exists: true,
+ Computed: computed,
+ }, nil
+}
+
+func (r *ConfigFieldReader) readPrimitive(
+ k string, schema *Schema) (FieldReadResult, error) {
+ raw, ok := r.Config.Get(k)
+ if !ok {
+ // Nothing in config, but we might still have a default from the schema
+ var err error
+ raw, err = schema.DefaultValue()
+ if err != nil {
+ return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err)
+ }
+
+ if raw == nil {
+ return FieldReadResult{}, nil
+ }
+ }
+
+ var result string
+ if err := mapstructure.WeakDecode(raw, &result); err != nil {
+ return FieldReadResult{}, err
+ }
+
+ computed := r.Config.IsComputed(k)
+ returnVal, err := stringToPrimitive(result, computed, schema)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+
+ return FieldReadResult{
+ Value: returnVal,
+ Exists: true,
+ Computed: computed,
+ }, nil
+}
+
+func (r *ConfigFieldReader) readSet(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ indexMap := make(map[string]int)
+ // Create the set that will be our result
+ set := schema.ZeroValue().(*Set)
+
+ raw, err := readListField(&nestedConfigFieldReader{r}, address, schema)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !raw.Exists {
+ return FieldReadResult{Value: set}, nil
+ }
+
+ // If the list is computed, the set is necessarilly computed
+ if raw.Computed {
+ return FieldReadResult{
+ Value: set,
+ Exists: true,
+ Computed: raw.Computed,
+ }, nil
+ }
+
+ // Build up the set from the list elements
+ for i, v := range raw.Value.([]interface{}) {
+ // Check if any of the keys in this item are computed
+ computed := r.hasComputedSubKeys(
+ fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema)
+
+ code := set.add(v, computed)
+ indexMap[code] = i
+ }
+
+ r.indexMaps[strings.Join(address, ".")] = indexMap
+
+ return FieldReadResult{
+ Value: set,
+ Exists: true,
+ }, nil
+}
+
+// hasComputedSubKeys walks through a schema and returns whether or not the
+// given key contains any subkeys that are computed.
+func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool {
+ prefix := key + "."
+
+ switch t := schema.Elem.(type) {
+ case *Resource:
+ for k, schema := range t.Schema {
+ if r.Config.IsComputed(prefix + k) {
+ return true
+ }
+
+ if r.hasComputedSubKeys(prefix+k, schema) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// nestedConfigFieldReader is a funny little thing that just wraps a
+// ConfigFieldReader to call readField when ReadField is called so that
+// we don't recalculate the set rewrites in the address, which leads to
+// an infinite loop.
+type nestedConfigFieldReader struct {
+ Reader *ConfigFieldReader
+}
+
+func (r *nestedConfigFieldReader) ReadField(
+ address []string) (FieldReadResult, error) {
+ return r.Reader.readField(address, true)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
new file mode 100644
index 00000000..16bbae29
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
@@ -0,0 +1,208 @@
+package schema
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/mapstructure"
+)
+
+// DiffFieldReader reads fields out of a diff structures.
+//
+// It also requires access to a Reader that reads fields from the structure
+// that the diff was derived from. This is usually the state. This is required
+// because a diff on its own doesn't have complete data about full objects
+// such as maps.
+//
+// The Source MUST be the data that the diff was derived from. If it isn't,
+// the behavior of this struct is undefined.
+//
+// Reading fields from a DiffFieldReader is identical to reading from
+// Source except the diff will be applied to the end result.
+//
+// The "Exists" field on the result will be set to true if the complete
+// field exists whether its from the source, diff, or a combination of both.
+// It cannot be determined whether a retrieved value is composed of
+// diff elements.
+type DiffFieldReader struct {
+ Diff *terraform.InstanceDiff
+ Source FieldReader
+ Schema map[string]*Schema
+}
+
+func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ schemaList := addrToSchema(address, r.Schema)
+ if len(schemaList) == 0 {
+ return FieldReadResult{}, nil
+ }
+
+ schema := schemaList[len(schemaList)-1]
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ return r.readPrimitive(address, schema)
+ case TypeList:
+ return readListField(r, address, schema)
+ case TypeMap:
+ return r.readMap(address, schema)
+ case TypeSet:
+ return r.readSet(address, schema)
+ case typeObject:
+ return readObjectField(r, address, schema.Elem.(map[string]*Schema))
+ default:
+ panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
+ }
+}
+
+func (r *DiffFieldReader) readMap(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ result := make(map[string]interface{})
+ resultSet := false
+
+ // First read the map from the underlying source
+ source, err := r.Source.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if source.Exists {
+ result = source.Value.(map[string]interface{})
+ resultSet = true
+ }
+
+ // Next, read all the elements we have in our diff, and apply
+ // the diff to our result.
+ prefix := strings.Join(address, ".") + "."
+ for k, v := range r.Diff.Attributes {
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+ if strings.HasPrefix(k, prefix+"%") {
+ // Ignore the count field
+ continue
+ }
+
+ resultSet = true
+
+ k = k[len(prefix):]
+ if v.NewRemoved {
+ delete(result, k)
+ continue
+ }
+
+ result[k] = v.New
+ }
+
+ err = mapValuesToPrimitive(result, schema)
+ if err != nil {
+ return FieldReadResult{}, nil
+ }
+
+ var resultVal interface{}
+ if resultSet {
+ resultVal = result
+ }
+
+ return FieldReadResult{
+ Value: resultVal,
+ Exists: resultSet,
+ }, nil
+}
+
+func (r *DiffFieldReader) readPrimitive(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ result, err := r.Source.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+
+ attrD, ok := r.Diff.Attributes[strings.Join(address, ".")]
+ if !ok {
+ return result, nil
+ }
+
+ var resultVal string
+ if !attrD.NewComputed {
+ resultVal = attrD.New
+ if attrD.NewExtra != nil {
+ result.ValueProcessed = resultVal
+ if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil {
+ return FieldReadResult{}, err
+ }
+ }
+ }
+
+ result.Computed = attrD.NewComputed
+ result.Exists = true
+ result.Value, err = stringToPrimitive(resultVal, false, schema)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+
+ return result, nil
+}
+
+func (r *DiffFieldReader) readSet(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ prefix := strings.Join(address, ".") + "."
+
+ // Create the set that will be our result
+ set := schema.ZeroValue().(*Set)
+
+ // Go through the map and find all the set items
+ for k, d := range r.Diff.Attributes {
+ if d.NewRemoved {
+ // If the field is removed, we always ignore it
+ continue
+ }
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+ if strings.HasSuffix(k, "#") {
+ // Ignore any count field
+ continue
+ }
+
+ // Split the key, since it might be a sub-object like "idx.field"
+ parts := strings.Split(k[len(prefix):], ".")
+ idx := parts[0]
+
+ raw, err := r.ReadField(append(address, idx))
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !raw.Exists {
+ // This shouldn't happen because we just verified it does exist
+ panic("missing field in set: " + k + "." + idx)
+ }
+
+ set.Add(raw.Value)
+ }
+
+ // Determine if the set "exists". It exists if there are items or if
+ // the diff explicitly wanted it empty.
+ exists := set.Len() > 0
+ if !exists {
+ // We could check if the diff value is "0" here but I think the
+ // existence of "#" on its own is enough to show it existed. This
+ // protects us in the future from the zero value changing from
+ // "0" to "" breaking us (if that were to happen).
+ if _, ok := r.Diff.Attributes[prefix+"#"]; ok {
+ exists = true
+ }
+ }
+
+ if !exists {
+ result, err := r.Source.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if result.Exists {
+ return result, nil
+ }
+ }
+
+ return FieldReadResult{
+ Value: set,
+ Exists: exists,
+ }, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
new file mode 100644
index 00000000..95339810
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
@@ -0,0 +1,232 @@
+package schema
+
+import (
+ "fmt"
+ "strings"
+)
+
+// MapFieldReader reads fields out of an untyped map[string]string to
+// the best of its ability.
+type MapFieldReader struct {
+ Map MapReader
+ Schema map[string]*Schema
+}
+
+func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ k := strings.Join(address, ".")
+ schemaList := addrToSchema(address, r.Schema)
+ if len(schemaList) == 0 {
+ return FieldReadResult{}, nil
+ }
+
+ schema := schemaList[len(schemaList)-1]
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ return r.readPrimitive(address, schema)
+ case TypeList:
+ return readListField(r, address, schema)
+ case TypeMap:
+ return r.readMap(k, schema)
+ case TypeSet:
+ return r.readSet(address, schema)
+ case typeObject:
+ return readObjectField(r, address, schema.Elem.(map[string]*Schema))
+ default:
+ panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+ }
+}
+
+func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
+ result := make(map[string]interface{})
+ resultSet := false
+
+ // If the name of the map field is directly in the map with an
+ // empty string, it means that the map is being deleted, so mark
+ // that is is set.
+ if v, ok := r.Map.Access(k); ok && v == "" {
+ resultSet = true
+ }
+
+ prefix := k + "."
+ r.Map.Range(func(k, v string) bool {
+ if strings.HasPrefix(k, prefix) {
+ resultSet = true
+
+ key := k[len(prefix):]
+ if key != "%" && key != "#" {
+ result[key] = v
+ }
+ }
+
+ return true
+ })
+
+ err := mapValuesToPrimitive(result, schema)
+ if err != nil {
+ return FieldReadResult{}, nil
+ }
+
+ var resultVal interface{}
+ if resultSet {
+ resultVal = result
+ }
+
+ return FieldReadResult{
+ Value: resultVal,
+ Exists: resultSet,
+ }, nil
+}
+
+func (r *MapFieldReader) readPrimitive(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ k := strings.Join(address, ".")
+ result, ok := r.Map.Access(k)
+ if !ok {
+ return FieldReadResult{}, nil
+ }
+
+ returnVal, err := stringToPrimitive(result, false, schema)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+
+ return FieldReadResult{
+ Value: returnVal,
+ Exists: true,
+ }, nil
+}
+
+func (r *MapFieldReader) readSet(
+ address []string, schema *Schema) (FieldReadResult, error) {
+ // Get the number of elements in the list
+ countRaw, err := r.readPrimitive(
+ append(address, "#"), &Schema{Type: TypeInt})
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ if !countRaw.Exists {
+ // No count, means we have no list
+ countRaw.Value = 0
+ }
+
+ // Create the set that will be our result
+ set := schema.ZeroValue().(*Set)
+
+ // If we have an empty list, then return an empty list
+ if countRaw.Computed || countRaw.Value.(int) == 0 {
+ return FieldReadResult{
+ Value: set,
+ Exists: countRaw.Exists,
+ Computed: countRaw.Computed,
+ }, nil
+ }
+
+ // Go through the map and find all the set items
+ prefix := strings.Join(address, ".") + "."
+ countExpected := countRaw.Value.(int)
+ countActual := make(map[string]struct{})
+ completed := r.Map.Range(func(k, _ string) bool {
+ if !strings.HasPrefix(k, prefix) {
+ return true
+ }
+ if strings.HasPrefix(k, prefix+"#") {
+ // Ignore the count field
+ return true
+ }
+
+ // Split the key, since it might be a sub-object like "idx.field"
+ parts := strings.Split(k[len(prefix):], ".")
+ idx := parts[0]
+
+ var raw FieldReadResult
+ raw, err = r.ReadField(append(address, idx))
+ if err != nil {
+ return false
+ }
+ if !raw.Exists {
+ // This shouldn't happen because we just verified it does exist
+ panic("missing field in set: " + k + "." + idx)
+ }
+
+ set.Add(raw.Value)
+
+ // Due to the way multimap readers work, if we've seen the number
+ // of fields we expect, then exit so that we don't read later values.
+ // For example: the "set" map might have "ports.#", "ports.0", and
+ // "ports.1", but the "state" map might have those plus "ports.2".
+ // We don't want "ports.2"
+ countActual[idx] = struct{}{}
+ if len(countActual) >= countExpected {
+ return false
+ }
+
+ return true
+ })
+ if !completed && err != nil {
+ return FieldReadResult{}, err
+ }
+
+ return FieldReadResult{
+ Value: set,
+ Exists: true,
+ }, nil
+}
+
+// MapReader is an interface that is given to MapFieldReader for accessing
+// a "map". This can be used to have alternate implementations. For a basic
+// map[string]string, use BasicMapReader.
+type MapReader interface {
+ Access(string) (string, bool)
+ Range(func(string, string) bool) bool
+}
+
+// BasicMapReader implements MapReader for a single map.
+type BasicMapReader map[string]string
+
+func (r BasicMapReader) Access(k string) (string, bool) {
+ v, ok := r[k]
+ return v, ok
+}
+
+func (r BasicMapReader) Range(f func(string, string) bool) bool {
+ for k, v := range r {
+ if cont := f(k, v); !cont {
+ return false
+ }
+ }
+
+ return true
+}
+
+// MultiMapReader reads over multiple maps, preferring keys that are
+// founder earlier (lower number index) vs. later (higher number index)
+type MultiMapReader []map[string]string
+
+func (r MultiMapReader) Access(k string) (string, bool) {
+ for _, m := range r {
+ if v, ok := m[k]; ok {
+ return v, ok
+ }
+ }
+
+ return "", false
+}
+
+func (r MultiMapReader) Range(f func(string, string) bool) bool {
+ done := make(map[string]struct{})
+ for _, m := range r {
+ for k, v := range m {
+ if _, ok := done[k]; ok {
+ continue
+ }
+
+ if cont := f(k, v); !cont {
+ return false
+ }
+
+ done[k] = struct{}{}
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go
new file mode 100644
index 00000000..89ad3a86
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go
@@ -0,0 +1,63 @@
+package schema
+
+import (
+ "fmt"
+)
+
+// MultiLevelFieldReader reads from other field readers,
+// merging their results along the way in a specific order. You can specify
+// "levels" and name them in order to read only an exact level or up to
+// a specific level.
+//
+// This is useful for saying things such as "read the field from the state
+// and config and merge them" or "read the latest value of the field".
+type MultiLevelFieldReader struct {
+ Readers map[string]FieldReader
+ Levels []string
+}
+
+func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1])
+}
+
+func (r *MultiLevelFieldReader) ReadFieldExact(
+ address []string, level string) (FieldReadResult, error) {
+ reader, ok := r.Readers[level]
+ if !ok {
+ return FieldReadResult{}, fmt.Errorf(
+ "Unknown reader level: %s", level)
+ }
+
+ result, err := reader.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, fmt.Errorf(
+ "Error reading level %s: %s", level, err)
+ }
+
+ return result, nil
+}
+
+func (r *MultiLevelFieldReader) ReadFieldMerge(
+ address []string, level string) (FieldReadResult, error) {
+ var result FieldReadResult
+ for _, l := range r.Levels {
+ if r, ok := r.Readers[l]; ok {
+ out, err := r.ReadField(address)
+ if err != nil {
+ return FieldReadResult{}, fmt.Errorf(
+ "Error reading level %s: %s", l, err)
+ }
+
+ // TODO: computed
+ if out.Exists {
+ result = out
+ }
+ }
+
+ if l == level {
+ break
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go
new file mode 100644
index 00000000..9abc41b5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go
@@ -0,0 +1,8 @@
+package schema
+
+// FieldWriters are responsible for writing fields by address into
+// a proper typed representation. ResourceData uses this to write new data
+// into existing sources.
+type FieldWriter interface {
+ WriteField([]string, interface{}) error
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
new file mode 100644
index 00000000..689ed8d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
@@ -0,0 +1,319 @@
+package schema
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+// MapFieldWriter writes data into a single map[string]string structure.
+type MapFieldWriter struct {
+ Schema map[string]*Schema
+
+ lock sync.Mutex
+ result map[string]string
+}
+
+// Map returns the underlying map that is being written to.
+func (w *MapFieldWriter) Map() map[string]string {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if w.result == nil {
+ w.result = make(map[string]string)
+ }
+
+ return w.result
+}
+
+func (w *MapFieldWriter) unsafeWriteField(addr string, value string) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if w.result == nil {
+ w.result = make(map[string]string)
+ }
+
+ w.result[addr] = value
+}
+
+func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if w.result == nil {
+ w.result = make(map[string]string)
+ }
+
+ schemaList := addrToSchema(addr, w.Schema)
+ if len(schemaList) == 0 {
+ return fmt.Errorf("Invalid address to set: %#v", addr)
+ }
+
+ // If we're setting anything other than a list root or set root,
+ // then disallow it.
+ for _, schema := range schemaList[:len(schemaList)-1] {
+ if schema.Type == TypeList {
+ return fmt.Errorf(
+ "%s: can only set full list",
+ strings.Join(addr, "."))
+ }
+
+ if schema.Type == TypeMap {
+ return fmt.Errorf(
+ "%s: can only set full map",
+ strings.Join(addr, "."))
+ }
+
+ if schema.Type == TypeSet {
+ return fmt.Errorf(
+ "%s: can only set full set",
+ strings.Join(addr, "."))
+ }
+ }
+
+ return w.set(addr, value)
+}
+
+func (w *MapFieldWriter) set(addr []string, value interface{}) error {
+ schemaList := addrToSchema(addr, w.Schema)
+ if len(schemaList) == 0 {
+ return fmt.Errorf("Invalid address to set: %#v", addr)
+ }
+
+ schema := schemaList[len(schemaList)-1]
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ return w.setPrimitive(addr, value, schema)
+ case TypeList:
+ return w.setList(addr, value, schema)
+ case TypeMap:
+ return w.setMap(addr, value, schema)
+ case TypeSet:
+ return w.setSet(addr, value, schema)
+ case typeObject:
+ return w.setObject(addr, value, schema)
+ default:
+ panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
+ }
+}
+
+func (w *MapFieldWriter) setList(
+ addr []string,
+ v interface{},
+ schema *Schema) error {
+ k := strings.Join(addr, ".")
+ setElement := func(idx string, value interface{}) error {
+ addrCopy := make([]string, len(addr), len(addr)+1)
+ copy(addrCopy, addr)
+ return w.set(append(addrCopy, idx), value)
+ }
+
+ var vs []interface{}
+ if err := mapstructure.Decode(v, &vs); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+
+ // Set the entire list.
+ var err error
+ for i, elem := range vs {
+ is := strconv.FormatInt(int64(i), 10)
+ err = setElement(is, elem)
+ if err != nil {
+ break
+ }
+ }
+ if err != nil {
+ for i, _ := range vs {
+ is := strconv.FormatInt(int64(i), 10)
+ setElement(is, nil)
+ }
+
+ return err
+ }
+
+ w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10)
+ return nil
+}
+
+func (w *MapFieldWriter) setMap(
+ addr []string,
+ value interface{},
+ schema *Schema) error {
+ k := strings.Join(addr, ".")
+ v := reflect.ValueOf(value)
+ vs := make(map[string]interface{})
+
+ if value == nil {
+ // The empty string here means the map is removed.
+ w.result[k] = ""
+ return nil
+ }
+
+ if v.Kind() != reflect.Map {
+ return fmt.Errorf("%s: must be a map", k)
+ }
+ if v.Type().Key().Kind() != reflect.String {
+ return fmt.Errorf("%s: keys must strings", k)
+ }
+ for _, mk := range v.MapKeys() {
+ mv := v.MapIndex(mk)
+ vs[mk.String()] = mv.Interface()
+ }
+
+ // Remove the pure key since we're setting the full map value
+ delete(w.result, k)
+
+ // Set each subkey
+ addrCopy := make([]string, len(addr), len(addr)+1)
+ copy(addrCopy, addr)
+ for subKey, v := range vs {
+ if err := w.set(append(addrCopy, subKey), v); err != nil {
+ return err
+ }
+ }
+
+ // Set the count
+ w.result[k+".%"] = strconv.Itoa(len(vs))
+
+ return nil
+}
+
+func (w *MapFieldWriter) setObject(
+ addr []string,
+ value interface{},
+ schema *Schema) error {
+ // Set the entire object. First decode into a proper structure
+ var v map[string]interface{}
+ if err := mapstructure.Decode(value, &v); err != nil {
+ return fmt.Errorf("%s: %s", strings.Join(addr, "."), err)
+ }
+
+ // Make space for additional elements in the address
+ addrCopy := make([]string, len(addr), len(addr)+1)
+ copy(addrCopy, addr)
+
+ // Set each element in turn
+ var err error
+ for k1, v1 := range v {
+ if err = w.set(append(addrCopy, k1), v1); err != nil {
+ break
+ }
+ }
+ if err != nil {
+ for k1, _ := range v {
+ w.set(append(addrCopy, k1), nil)
+ }
+ }
+
+ return err
+}
+
+func (w *MapFieldWriter) setPrimitive(
+ addr []string,
+ v interface{},
+ schema *Schema) error {
+ k := strings.Join(addr, ".")
+
+ if v == nil {
+ // The empty string here means the value is removed.
+ w.result[k] = ""
+ return nil
+ }
+
+ var set string
+ switch schema.Type {
+ case TypeBool:
+ var b bool
+ if err := mapstructure.Decode(v, &b); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+
+ set = strconv.FormatBool(b)
+ case TypeString:
+ if err := mapstructure.Decode(v, &set); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ case TypeInt:
+ var n int
+ if err := mapstructure.Decode(v, &n); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ set = strconv.FormatInt(int64(n), 10)
+ case TypeFloat:
+ var n float64
+ if err := mapstructure.Decode(v, &n); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ set = strconv.FormatFloat(float64(n), 'G', -1, 64)
+ default:
+ return fmt.Errorf("Unknown type: %#v", schema.Type)
+ }
+
+ w.result[k] = set
+ return nil
+}
+
+func (w *MapFieldWriter) setSet(
+ addr []string,
+ value interface{},
+ schema *Schema) error {
+ addrCopy := make([]string, len(addr), len(addr)+1)
+ copy(addrCopy, addr)
+ k := strings.Join(addr, ".")
+
+ if value == nil {
+ w.result[k+".#"] = "0"
+ return nil
+ }
+
+ // If it is a slice, then we have to turn it into a *Set so that
+ // we get the proper order back based on the hash code.
+ if v := reflect.ValueOf(value); v.Kind() == reflect.Slice {
+ // Build a temp *ResourceData to use for the conversion
+ tempSchema := *schema
+ tempSchema.Type = TypeList
+ tempSchemaMap := map[string]*Schema{addr[0]: &tempSchema}
+ tempW := &MapFieldWriter{Schema: tempSchemaMap}
+
+ // Set the entire list, this lets us get sane values out of it
+ if err := tempW.WriteField(addr, value); err != nil {
+ return err
+ }
+
+ // Build the set by going over the list items in order and
+ // hashing them into the set. The reason we go over the list and
+ // not the `value` directly is because this forces all types
+ // to become []interface{} (generic) instead of []string, which
+ // most hash functions are expecting.
+ s := schema.ZeroValue().(*Set)
+ tempR := &MapFieldReader{
+ Map: BasicMapReader(tempW.Map()),
+ Schema: tempSchemaMap,
+ }
+ for i := 0; i < v.Len(); i++ {
+ is := strconv.FormatInt(int64(i), 10)
+ result, err := tempR.ReadField(append(addrCopy, is))
+ if err != nil {
+ return err
+ }
+ if !result.Exists {
+ panic("set item just set doesn't exist")
+ }
+
+ s.Add(result.Value)
+ }
+
+ value = s
+ }
+
+ for code, elem := range value.(*Set).m {
+ if err := w.set(append(addrCopy, code), elem); err != nil {
+ return err
+ }
+ }
+
+ w.result[k+".#"] = strconv.Itoa(value.(*Set).Len())
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
new file mode 100644
index 00000000..3a976293
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
@@ -0,0 +1,36 @@
+// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT.
+
+package schema
+
+import "fmt"
+
+const (
+ _getSource_name_0 = "getSourceStategetSourceConfig"
+ _getSource_name_1 = "getSourceDiff"
+ _getSource_name_2 = "getSourceSet"
+ _getSource_name_3 = "getSourceLevelMaskgetSourceExact"
+)
+
+var (
+ _getSource_index_0 = [...]uint8{0, 14, 29}
+ _getSource_index_1 = [...]uint8{0, 13}
+ _getSource_index_2 = [...]uint8{0, 12}
+ _getSource_index_3 = [...]uint8{0, 18, 32}
+)
+
+func (i getSource) String() string {
+ switch {
+ case 1 <= i && i <= 2:
+ i -= 1
+ return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]]
+ case i == 4:
+ return _getSource_name_1
+ case i == 8:
+ return _getSource_name_2
+ case 15 <= i && i <= 16:
+ i -= 15
+ return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]]
+ default:
+ return fmt.Sprintf("getSource(%d)", i)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
new file mode 100644
index 00000000..d52d2f5f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
@@ -0,0 +1,400 @@
+package schema
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Provider represents a resource provider in Terraform, and properly
+// implements all of the ResourceProvider API.
+//
+// By defining a schema for the configuration of the provider, the
+// map of supporting resources, and a configuration function, the schema
+// framework takes over and handles all the provider operations for you.
+//
+// After defining the provider structure, it is unlikely that you'll require any
+// of the methods on Provider itself.
+type Provider struct {
+ // Schema is the schema for the configuration of this provider. If this
+ // provider has no configuration, this can be omitted.
+ //
+ // The keys of this map are the configuration keys, and the value is
+ // the schema describing the value of the configuration.
+ Schema map[string]*Schema
+
+ // ResourcesMap is the list of available resources that this provider
+ // can manage, along with their Resource structure defining their
+ // own schemas and CRUD operations.
+ //
+ // Provider automatically handles routing operations such as Apply,
+ // Diff, etc. to the proper resource.
+ ResourcesMap map[string]*Resource
+
+ // DataSourcesMap is the collection of available data sources that
+ // this provider implements, with a Resource instance defining
+ // the schema and Read operation of each.
+ //
+ // Resource instances for data sources must have a Read function
+ // and must *not* implement Create, Update or Delete.
+ DataSourcesMap map[string]*Resource
+
+ // ConfigureFunc is a function for configuring the provider. If the
+ // provider doesn't need to be configured, this can be omitted.
+ //
+ // See the ConfigureFunc documentation for more information.
+ ConfigureFunc ConfigureFunc
+
+ // MetaReset is called by TestReset to reset any state stored in the meta
+ // interface. This is especially important if the StopContext is stored by
+ // the provider.
+ MetaReset func() error
+
+ meta interface{}
+
+ // a mutex is required because TestReset can directly repalce the stopCtx
+ stopMu sync.Mutex
+ stopCtx context.Context
+ stopCtxCancel context.CancelFunc
+ stopOnce sync.Once
+}
+
+// ConfigureFunc is the function used to configure a Provider.
+//
+// The interface{} value returned by this function is stored and passed into
+// the subsequent resources as the meta parameter. This return value is
+// usually used to pass along a configured API client, a configuration
+// structure, etc.
+type ConfigureFunc func(*ResourceData) (interface{}, error)
+
+// InternalValidate should be called to validate the structure
+// of the provider.
+//
+// This should be called in a unit test for any provider to verify
+// before release that a provider is properly configured for use with
+// this library.
+func (p *Provider) InternalValidate() error {
+ if p == nil {
+ return errors.New("provider is nil")
+ }
+
+ var validationErrors error
+ sm := schemaMap(p.Schema)
+ if err := sm.InternalValidate(sm); err != nil {
+ validationErrors = multierror.Append(validationErrors, err)
+ }
+
+ for k, r := range p.ResourcesMap {
+ if err := r.InternalValidate(nil, true); err != nil {
+ validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err))
+ }
+ }
+
+ for k, r := range p.DataSourcesMap {
+ if err := r.InternalValidate(nil, false); err != nil {
+ validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err))
+ }
+ }
+
+ return validationErrors
+}
+
+// Meta returns the metadata associated with this provider that was
+// returned by the Configure call. It will be nil until Configure is called.
+func (p *Provider) Meta() interface{} {
+ return p.meta
+}
+
+// SetMeta can be used to forcefully set the Meta object of the provider.
+// Note that if Configure is called the return value will override anything
+// set here.
+func (p *Provider) SetMeta(v interface{}) {
+ p.meta = v
+}
+
+// Stopped reports whether the provider has been stopped or not.
+func (p *Provider) Stopped() bool {
+ ctx := p.StopContext()
+ select {
+ case <-ctx.Done():
+ return true
+ default:
+ return false
+ }
+}
+
+// StopCh returns a channel that is closed once the provider is stopped.
+func (p *Provider) StopContext() context.Context {
+ p.stopOnce.Do(p.stopInit)
+
+ p.stopMu.Lock()
+ defer p.stopMu.Unlock()
+
+ return p.stopCtx
+}
+
+func (p *Provider) stopInit() {
+ p.stopMu.Lock()
+ defer p.stopMu.Unlock()
+
+ p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
+}
+
+// Stop implementation of terraform.ResourceProvider interface.
+func (p *Provider) Stop() error {
+ p.stopOnce.Do(p.stopInit)
+
+ p.stopMu.Lock()
+ defer p.stopMu.Unlock()
+
+ p.stopCtxCancel()
+ return nil
+}
+
+// TestReset resets any state stored in the Provider, and will call TestReset
+// on Meta if it implements the TestProvider interface.
+// This may be used to reset the schema.Provider at the start of a test, and is
+// automatically called by resource.Test.
+func (p *Provider) TestReset() error {
+ p.stopInit()
+ if p.MetaReset != nil {
+ return p.MetaReset()
+ }
+ return nil
+}
+
+// Input implementation of terraform.ResourceProvider interface.
+func (p *Provider) Input(
+ input terraform.UIInput,
+ c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+ return schemaMap(p.Schema).Input(input, c)
+}
+
+// Validate implementation of terraform.ResourceProvider interface.
+func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ if err := p.InternalValidate(); err != nil {
+ return nil, []error{fmt.Errorf(
+ "Internal validation of the provider failed! This is always a bug\n"+
+ "with the provider itself, and not a user issue. Please report\n"+
+ "this bug:\n\n%s", err)}
+ }
+
+ return schemaMap(p.Schema).Validate(c)
+}
+
+// ValidateResource implementation of terraform.ResourceProvider interface.
+func (p *Provider) ValidateResource(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ r, ok := p.ResourcesMap[t]
+ if !ok {
+ return nil, []error{fmt.Errorf(
+ "Provider doesn't support resource: %s", t)}
+ }
+
+ return r.Validate(c)
+}
+
+// Configure implementation of terraform.ResourceProvider interface.
+func (p *Provider) Configure(c *terraform.ResourceConfig) error {
+ // No configuration
+ if p.ConfigureFunc == nil {
+ return nil
+ }
+
+ sm := schemaMap(p.Schema)
+
+ // Get a ResourceData for this configuration. To do this, we actually
+ // generate an intermediary "diff" although that is never exposed.
+ diff, err := sm.Diff(nil, c)
+ if err != nil {
+ return err
+ }
+
+ data, err := sm.Data(nil, diff)
+ if err != nil {
+ return err
+ }
+
+ meta, err := p.ConfigureFunc(data)
+ if err != nil {
+ return err
+ }
+
+ p.meta = meta
+ return nil
+}
+
+// Apply implementation of terraform.ResourceProvider interface.
+func (p *Provider) Apply(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+ r, ok := p.ResourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+ }
+
+ return r.Apply(s, d, p.meta)
+}
+
+// Diff implementation of terraform.ResourceProvider interface.
+func (p *Provider) Diff(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+ r, ok := p.ResourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+ }
+
+ return r.Diff(s, c)
+}
+
+// Refresh implementation of terraform.ResourceProvider interface.
+func (p *Provider) Refresh(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState) (*terraform.InstanceState, error) {
+ r, ok := p.ResourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+ }
+
+ return r.Refresh(s, p.meta)
+}
+
+// Resources implementation of terraform.ResourceProvider interface.
+func (p *Provider) Resources() []terraform.ResourceType {
+ keys := make([]string, 0, len(p.ResourcesMap))
+ for k, _ := range p.ResourcesMap {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ result := make([]terraform.ResourceType, 0, len(keys))
+ for _, k := range keys {
+ resource := p.ResourcesMap[k]
+
+ // This isn't really possible (it'd fail InternalValidate), but
+ // we do it anyways to avoid a panic.
+ if resource == nil {
+ resource = &Resource{}
+ }
+
+ result = append(result, terraform.ResourceType{
+ Name: k,
+ Importable: resource.Importer != nil,
+ })
+ }
+
+ return result
+}
+
+func (p *Provider) ImportState(
+ info *terraform.InstanceInfo,
+ id string) ([]*terraform.InstanceState, error) {
+ // Find the resource
+ r, ok := p.ResourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+ }
+
+ // If it doesn't support import, error
+ if r.Importer == nil {
+ return nil, fmt.Errorf("resource %s doesn't support import", info.Type)
+ }
+
+ // Create the data
+ data := r.Data(nil)
+ data.SetId(id)
+ data.SetType(info.Type)
+
+ // Call the import function
+ results := []*ResourceData{data}
+ if r.Importer.State != nil {
+ var err error
+ results, err = r.Importer.State(data, p.meta)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Convert the results to InstanceState values and return it
+ states := make([]*terraform.InstanceState, len(results))
+ for i, r := range results {
+ states[i] = r.State()
+ }
+
+ // Verify that all are non-nil. If there are any nil the error
+ // isn't obvious so we circumvent that with a friendlier error.
+ for _, s := range states {
+ if s == nil {
+ return nil, fmt.Errorf(
+ "nil entry in ImportState results. This is always a bug with\n" +
+ "the resource that is being imported. Please report this as\n" +
+ "a bug to Terraform.")
+ }
+ }
+
+ return states, nil
+}
+
+// ValidateDataSource implementation of terraform.ResourceProvider interface.
+func (p *Provider) ValidateDataSource(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ r, ok := p.DataSourcesMap[t]
+ if !ok {
+ return nil, []error{fmt.Errorf(
+ "Provider doesn't support data source: %s", t)}
+ }
+
+ return r.Validate(c)
+}
+
+// ReadDataDiff implementation of terraform.ResourceProvider interface.
+func (p *Provider) ReadDataDiff(
+ info *terraform.InstanceInfo,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+
+ r, ok := p.DataSourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown data source: %s", info.Type)
+ }
+
+ return r.Diff(nil, c)
+}
+
+// RefreshData implementation of terraform.ResourceProvider interface.
+func (p *Provider) ReadDataApply(
+ info *terraform.InstanceInfo,
+ d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+
+ r, ok := p.DataSourcesMap[info.Type]
+ if !ok {
+ return nil, fmt.Errorf("unknown data source: %s", info.Type)
+ }
+
+ return r.ReadDataApply(d, p.meta)
+}
+
+// DataSources implementation of terraform.ResourceProvider interface.
+func (p *Provider) DataSources() []terraform.DataSource {
+ keys := make([]string, 0, len(p.DataSourcesMap))
+ for k, _ := range p.DataSourcesMap {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ result := make([]terraform.DataSource, 0, len(keys))
+ for _, k := range keys {
+ result = append(result, terraform.DataSource{
+ Name: k,
+ })
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
new file mode 100644
index 00000000..c1564a21
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
@@ -0,0 +1,180 @@
+package schema
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Provisioner represents a resource provisioner in Terraform and properly
+// implements all of the ResourceProvisioner API.
+//
+// This higher level structure makes it much easier to implement a new or
+// custom provisioner for Terraform.
+//
+// The function callbacks for this structure are all passed a context object.
+// This context object has a number of pre-defined values that can be accessed
+// via the global functions defined in context.go.
+type Provisioner struct {
+ // ConnSchema is the schema for the connection settings for this
+ // provisioner.
+ //
+ // The keys of this map are the configuration keys, and the value is
+ // the schema describing the value of the configuration.
+ //
+ // NOTE: The value of connection keys can only be strings for now.
+ ConnSchema map[string]*Schema
+
+ // Schema is the schema for the usage of this provisioner.
+ //
+ // The keys of this map are the configuration keys, and the value is
+ // the schema describing the value of the configuration.
+ Schema map[string]*Schema
+
+ // ApplyFunc is the function for executing the provisioner. This is required.
+ // It is given a context. See the Provisioner struct docs for more
+ // information.
+ ApplyFunc func(ctx context.Context) error
+
+ stopCtx context.Context
+ stopCtxCancel context.CancelFunc
+ stopOnce sync.Once
+}
+
+// Keys that can be used to access data in the context parameters for
+// Provisioners.
+var (
+ connDataInvalid = contextKey("data invalid")
+
+ // This returns a *ResourceData for the connection information.
+ // Guaranteed to never be nil.
+ ProvConnDataKey = contextKey("provider conn data")
+
+ // This returns a *ResourceData for the config information.
+ // Guaranteed to never be nil.
+ ProvConfigDataKey = contextKey("provider config data")
+
+ // This returns a terraform.UIOutput. Guaranteed to never be nil.
+ ProvOutputKey = contextKey("provider output")
+
+ // This returns the raw InstanceState passed to Apply. Guaranteed to
+ // be set, but may be nil.
+ ProvRawStateKey = contextKey("provider raw state")
+)
+
+// InternalValidate should be called to validate the structure
+// of the provisioner.
+//
+// This should be called in a unit test to verify before release that this
+// structure is properly configured for use.
+func (p *Provisioner) InternalValidate() error {
+ if p == nil {
+ return errors.New("provisioner is nil")
+ }
+
+ var validationErrors error
+ {
+ sm := schemaMap(p.ConnSchema)
+ if err := sm.InternalValidate(sm); err != nil {
+ validationErrors = multierror.Append(validationErrors, err)
+ }
+ }
+
+ {
+ sm := schemaMap(p.Schema)
+ if err := sm.InternalValidate(sm); err != nil {
+ validationErrors = multierror.Append(validationErrors, err)
+ }
+ }
+
+ if p.ApplyFunc == nil {
+ validationErrors = multierror.Append(validationErrors, fmt.Errorf(
+ "ApplyFunc must not be nil"))
+ }
+
+ return validationErrors
+}
+
+// StopContext returns a context that checks whether a provisioner is stopped.
+func (p *Provisioner) StopContext() context.Context {
+ p.stopOnce.Do(p.stopInit)
+ return p.stopCtx
+}
+
+func (p *Provisioner) stopInit() {
+ p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
+}
+
+// Stop implementation of terraform.ResourceProvisioner interface.
+func (p *Provisioner) Stop() error {
+ p.stopOnce.Do(p.stopInit)
+ p.stopCtxCancel()
+ return nil
+}
+
+func (p *Provisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ return schemaMap(p.Schema).Validate(c)
+}
+
+// Apply implementation of terraform.ResourceProvisioner interface.
+func (p *Provisioner) Apply(
+ o terraform.UIOutput,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) error {
+ var connData, configData *ResourceData
+
+ {
+ // We first need to turn the connection information into a
+ // terraform.ResourceConfig so that we can use that type to more
+ // easily build a ResourceData structure. We do this by simply treating
+ // the conn info as configuration input.
+ raw := make(map[string]interface{})
+ if s != nil {
+ for k, v := range s.Ephemeral.ConnInfo {
+ raw[k] = v
+ }
+ }
+
+ c, err := config.NewRawConfig(raw)
+ if err != nil {
+ return err
+ }
+
+ sm := schemaMap(p.ConnSchema)
+ diff, err := sm.Diff(nil, terraform.NewResourceConfig(c))
+ if err != nil {
+ return err
+ }
+ connData, err = sm.Data(nil, diff)
+ if err != nil {
+ return err
+ }
+ }
+
+ {
+ // Build the configuration data. Doing this requires making a "diff"
+ // even though that's never used. We use that just to get the correct types.
+ configMap := schemaMap(p.Schema)
+ diff, err := configMap.Diff(nil, c)
+ if err != nil {
+ return err
+ }
+ configData, err = configMap.Data(nil, diff)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Build the context and call the function
+ ctx := p.StopContext()
+ ctx = context.WithValue(ctx, ProvConnDataKey, connData)
+ ctx = context.WithValue(ctx, ProvConfigDataKey, configData)
+ ctx = context.WithValue(ctx, ProvOutputKey, o)
+ ctx = context.WithValue(ctx, ProvRawStateKey, s)
+ return p.ApplyFunc(ctx)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
new file mode 100644
index 00000000..c8105588
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
@@ -0,0 +1,478 @@
+package schema
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "strconv"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Resource represents a thing in Terraform that has a set of configurable
+// attributes and a lifecycle (create, read, update, delete).
+//
+// The Resource schema is an abstraction that allows provider writers to
+// worry only about CRUD operations while off-loading validation, diff
+// generation, etc. to this higher level library.
+//
+// In spite of the name, this struct is not used only for terraform resources,
+// but also for data sources. In the case of data sources, the Create,
+// Update and Delete functions must not be provided.
+type Resource struct {
+ // Schema is the schema for the configuration of this resource.
+ //
+ // The keys of this map are the configuration keys, and the values
+ // describe the schema of the configuration value.
+ //
+ // The schema is used to represent both configurable data as well
+ // as data that might be computed in the process of creating this
+ // resource.
+ Schema map[string]*Schema
+
+ // SchemaVersion is the version number for this resource's Schema
+ // definition. The current SchemaVersion stored in the state for each
+ // resource. Provider authors can increment this version number
+ // when Schema semantics change. If the State's SchemaVersion is less than
+ // the current SchemaVersion, the InstanceState is yielded to the
+ // MigrateState callback, where the provider can make whatever changes it
+ // needs to update the state to be compatible to the latest version of the
+ // Schema.
+ //
+ // When unset, SchemaVersion defaults to 0, so provider authors can start
+ // their Versioning at any integer >= 1
+ SchemaVersion int
+
+ // MigrateState is responsible for updating an InstanceState with an old
+ // version to the format expected by the current version of the Schema.
+ //
+ // It is called during Refresh if the State's stored SchemaVersion is less
+ // than the current SchemaVersion of the Resource.
+ //
+ // The function is yielded the state's stored SchemaVersion and a pointer to
+ // the InstanceState that needs updating, as well as the configured
+ // provider's configured meta interface{}, in case the migration process
+ // needs to make any remote API calls.
+ MigrateState StateMigrateFunc
+
+ // The functions below are the CRUD operations for this resource.
+ //
+ // The only optional operation is Update. If Update is not implemented,
+ // then updates will not be supported for this resource.
+ //
+ // The ResourceData parameter in the functions below are used to
+ // query configuration and changes for the resource as well as to set
+ // the ID, computed data, etc.
+ //
+ // The interface{} parameter is the result of the ConfigureFunc in
+ // the provider for this resource. If the provider does not define
+ // a ConfigureFunc, this will be nil. This parameter should be used
+ // to store API clients, configuration structures, etc.
+ //
+ // If any errors occur during each of the operation, an error should be
+ // returned. If a resource was partially updated, be careful to enable
+ // partial state mode for ResourceData and use it accordingly.
+ //
+ // Exists is a function that is called to check if a resource still
+ // exists. If this returns false, then this will affect the diff
+ // accordingly. If this function isn't set, it will not be called. It
+ // is highly recommended to set it. The *ResourceData passed to Exists
+ // should _not_ be modified.
+ Create CreateFunc
+ Read ReadFunc
+ Update UpdateFunc
+ Delete DeleteFunc
+ Exists ExistsFunc
+
+ // Importer is the ResourceImporter implementation for this resource.
+ // If this is nil, then this resource does not support importing. If
+ // this is non-nil, then it supports importing and ResourceImporter
+ // must be validated. The validity of ResourceImporter is verified
+ // by InternalValidate on Resource.
+ Importer *ResourceImporter
+
+ // If non-empty, this string is emitted as a warning during Validate.
+ // This is a private interface for now, for use by DataSourceResourceShim,
+ // and not for general use. (But maybe later...)
+ deprecationMessage string
+
+ // Timeouts allow users to specify specific time durations in which an
+ // operation should time out, to allow them to extend an action to suit their
+ // usage. For example, a user may specify a large Creation timeout for their
+ // AWS RDS Instance due to it's size, or restoring from a snapshot.
+ // Resource implementors must enable Timeout support by adding the allowed
+ // actions (Create, Read, Update, Delete, Default) to the Resource struct, and
+ // accessing them in the matching methods.
+ Timeouts *ResourceTimeout
+}
+
+// See Resource documentation.
+type CreateFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type ReadFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type UpdateFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type DeleteFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type ExistsFunc func(*ResourceData, interface{}) (bool, error)
+
+// See Resource documentation.
+type StateMigrateFunc func(
+ int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error)
+
+// Apply creates, updates, and/or deletes a resource.
+func (r *Resource) Apply(
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff,
+ meta interface{}) (*terraform.InstanceState, error) {
+ data, err := schemaMap(r.Schema).Data(s, d)
+ if err != nil {
+ return s, err
+ }
+
+ // Instance Diff shoould have the timeout info, need to copy it over to the
+ // ResourceData meta
+ rt := ResourceTimeout{}
+ if _, ok := d.Meta[TimeoutKey]; ok {
+ if err := rt.DiffDecode(d); err != nil {
+ log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
+ }
+ } else {
+ log.Printf("[DEBUG] No meta timeoutkey found in Apply()")
+ }
+ data.timeouts = &rt
+
+ if s == nil {
+ // The Terraform API dictates that this should never happen, but
+ // it doesn't hurt to be safe in this case.
+ s = new(terraform.InstanceState)
+ }
+
+ if d.Destroy || d.RequiresNew() {
+ if s.ID != "" {
+ // Destroy the resource since it is created
+ if err := r.Delete(data, meta); err != nil {
+ return r.recordCurrentSchemaVersion(data.State()), err
+ }
+
+ // Make sure the ID is gone.
+ data.SetId("")
+ }
+
+ // If we're only destroying, and not creating, then return
+ // now since we're done!
+ if !d.RequiresNew() {
+ return nil, nil
+ }
+
+ // Reset the data to be stateless since we just destroyed
+ data, err = schemaMap(r.Schema).Data(nil, d)
+ // data was reset, need to re-apply the parsed timeouts
+ data.timeouts = &rt
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ err = nil
+ if data.Id() == "" {
+ // We're creating, it is a new resource.
+ data.MarkNewResource()
+ err = r.Create(data, meta)
+ } else {
+ if r.Update == nil {
+ return s, fmt.Errorf("doesn't support update")
+ }
+
+ err = r.Update(data, meta)
+ }
+
+ return r.recordCurrentSchemaVersion(data.State()), err
+}
+
+// Diff returns a diff of this resource and is API compatible with the
+// ResourceProvider interface.
+func (r *Resource) Diff(
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+
+ t := &ResourceTimeout{}
+ err := t.ConfigDecode(r, c)
+
+ if err != nil {
+ return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
+ }
+
+ instanceDiff, err := schemaMap(r.Schema).Diff(s, c)
+ if err != nil {
+ return instanceDiff, err
+ }
+
+ if instanceDiff != nil {
+ if err := t.DiffEncode(instanceDiff); err != nil {
+ log.Printf("[ERR] Error encoding timeout to instance diff: %s", err)
+ }
+ } else {
+ log.Printf("[DEBUG] Instance Diff is nil in Diff()")
+ }
+
+ return instanceDiff, err
+}
+
+// Validate validates the resource configuration against the schema.
+func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ warns, errs := schemaMap(r.Schema).Validate(c)
+
+ if r.deprecationMessage != "" {
+ warns = append(warns, r.deprecationMessage)
+ }
+
+ return warns, errs
+}
+
+// ReadDataApply loads the data for a data source, given a diff that
+// describes the configuration arguments and desired computed attributes.
+func (r *Resource) ReadDataApply(
+ d *terraform.InstanceDiff,
+ meta interface{},
+) (*terraform.InstanceState, error) {
+
+ // Data sources are always built completely from scratch
+ // on each read, so the source state is always nil.
+ data, err := schemaMap(r.Schema).Data(nil, d)
+ if err != nil {
+ return nil, err
+ }
+
+ err = r.Read(data, meta)
+ state := data.State()
+ if state != nil && state.ID == "" {
+ // Data sources can set an ID if they want, but they aren't
+ // required to; we'll provide a placeholder if they don't,
+ // to preserve the invariant that all resources have non-empty
+ // ids.
+ state.ID = "-"
+ }
+
+ return r.recordCurrentSchemaVersion(state), err
+}
+
+// Refresh refreshes the state of the resource.
+func (r *Resource) Refresh(
+ s *terraform.InstanceState,
+ meta interface{}) (*terraform.InstanceState, error) {
+ // If the ID is already somehow blank, it doesn't exist
+ if s.ID == "" {
+ return nil, nil
+ }
+
+ rt := ResourceTimeout{}
+ if _, ok := s.Meta[TimeoutKey]; ok {
+ if err := rt.StateDecode(s); err != nil {
+ log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
+ }
+ }
+
+ if r.Exists != nil {
+ // Make a copy of data so that if it is modified it doesn't
+ // affect our Read later.
+ data, err := schemaMap(r.Schema).Data(s, nil)
+ data.timeouts = &rt
+
+ if err != nil {
+ return s, err
+ }
+
+ exists, err := r.Exists(data, meta)
+ if err != nil {
+ return s, err
+ }
+ if !exists {
+ return nil, nil
+ }
+ }
+
+ needsMigration, stateSchemaVersion := r.checkSchemaVersion(s)
+ if needsMigration && r.MigrateState != nil {
+ s, err := r.MigrateState(stateSchemaVersion, s, meta)
+ if err != nil {
+ return s, err
+ }
+ }
+
+ data, err := schemaMap(r.Schema).Data(s, nil)
+ data.timeouts = &rt
+ if err != nil {
+ return s, err
+ }
+
+ err = r.Read(data, meta)
+ state := data.State()
+ if state != nil && state.ID == "" {
+ state = nil
+ }
+
+ return r.recordCurrentSchemaVersion(state), err
+}
+
+// InternalValidate should be called to validate the structure
+// of the resource.
+//
+// This should be called in a unit test for any resource to verify
+// before release that a resource is properly configured for use with
+// this library.
+//
+// Provider.InternalValidate() will automatically call this for all of
+// the resources it manages, so you don't need to call this manually if it
+// is part of a Provider.
+func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error {
+ if r == nil {
+ return errors.New("resource is nil")
+ }
+
+ if !writable {
+ if r.Create != nil || r.Update != nil || r.Delete != nil {
+ return fmt.Errorf("must not implement Create, Update or Delete")
+ }
+ }
+
+ tsm := topSchemaMap
+
+ if r.isTopLevel() && writable {
+ // All non-Computed attributes must be ForceNew if Update is not defined
+ if r.Update == nil {
+ nonForceNewAttrs := make([]string, 0)
+ for k, v := range r.Schema {
+ if !v.ForceNew && !v.Computed {
+ nonForceNewAttrs = append(nonForceNewAttrs, k)
+ }
+ }
+ if len(nonForceNewAttrs) > 0 {
+ return fmt.Errorf(
+ "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs)
+ }
+ } else {
+ nonUpdateableAttrs := make([]string, 0)
+ for k, v := range r.Schema {
+ if v.ForceNew || v.Computed && !v.Optional {
+ nonUpdateableAttrs = append(nonUpdateableAttrs, k)
+ }
+ }
+ updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs)
+ if updateableAttrs == 0 {
+ return fmt.Errorf(
+ "All fields are ForceNew or Computed w/out Optional, Update is superfluous")
+ }
+ }
+
+ tsm = schemaMap(r.Schema)
+
+ // Destroy, and Read are required
+ if r.Read == nil {
+ return fmt.Errorf("Read must be implemented")
+ }
+ if r.Delete == nil {
+ return fmt.Errorf("Delete must be implemented")
+ }
+
+ // If we have an importer, we need to verify the importer.
+ if r.Importer != nil {
+ if err := r.Importer.InternalValidate(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return schemaMap(r.Schema).InternalValidate(tsm)
+}
+
+// Data returns a ResourceData struct for this Resource. Each return value
+// is a separate copy and can be safely modified differently.
+//
+// The data returned from this function has no actual affect on the Resource
+// itself (including the state given to this function).
+//
+// This function is useful for unit tests and ResourceImporter functions.
+func (r *Resource) Data(s *terraform.InstanceState) *ResourceData {
+ result, err := schemaMap(r.Schema).Data(s, nil)
+ if err != nil {
+ // At the time of writing, this isn't possible (Data never returns
+ // non-nil errors). We panic to find this in the future if we have to.
+ // I don't see a reason for Data to ever return an error.
+ panic(err)
+ }
+
+ // Set the schema version to latest by default
+ result.meta = map[string]interface{}{
+ "schema_version": strconv.Itoa(r.SchemaVersion),
+ }
+
+ return result
+}
+
+// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing
+//
+// TODO: May be able to be removed with the above ResourceData function.
+func (r *Resource) TestResourceData() *ResourceData {
+ return &ResourceData{
+ schema: r.Schema,
+ }
+}
+
+// Returns true if the resource is "top level" i.e. not a sub-resource.
+func (r *Resource) isTopLevel() bool {
+ // TODO: This is a heuristic; replace with a definitive attribute?
+ return r.Create != nil
+}
+
+// Determines if a given InstanceState needs to be migrated by checking the
+// stored version number with the current SchemaVersion
+func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) {
+ // Get the raw interface{} value for the schema version. If it doesn't
+ // exist or is nil then set it to zero.
+ raw := is.Meta["schema_version"]
+ if raw == nil {
+ raw = "0"
+ }
+
+ // Try to convert it to a string. If it isn't a string then we pretend
+ // that it isn't set at all. It should never not be a string unless it
+ // was manually tampered with.
+ rawString, ok := raw.(string)
+ if !ok {
+ rawString = "0"
+ }
+
+ stateSchemaVersion, _ := strconv.Atoi(rawString)
+ return stateSchemaVersion < r.SchemaVersion, stateSchemaVersion
+}
+
+func (r *Resource) recordCurrentSchemaVersion(
+ state *terraform.InstanceState) *terraform.InstanceState {
+ if state != nil && r.SchemaVersion > 0 {
+ if state.Meta == nil {
+ state.Meta = make(map[string]interface{})
+ }
+ state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion)
+ }
+ return state
+}
+
+// Noop is a convenience implementation of resource function which takes
+// no action and returns no error.
+func Noop(*ResourceData, interface{}) error {
+ return nil
+}
+
+// RemoveFromState is a convenience implementation of a resource function
+// which sets the resource ID to empty string (to remove it from state)
+// and returns no error.
+func RemoveFromState(d *ResourceData, _ interface{}) error {
+ d.SetId("")
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
new file mode 100644
index 00000000..b2bc8f6c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
@@ -0,0 +1,502 @@
+package schema
+
+import (
+ "log"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// ResourceData is used to query and set the attributes of a resource.
+//
+// ResourceData is the primary argument received for CRUD operations on
+// a resource as well as configuration of a provider. It is a powerful
+// structure that can be used to not only query data, but check for changes,
+// define partial state updates, etc.
+//
+// The most relevant methods to take a look at are Get, Set, and Partial.
+type ResourceData struct {
+ // Settable (internally)
+ schema map[string]*Schema
+ config *terraform.ResourceConfig
+ state *terraform.InstanceState
+ diff *terraform.InstanceDiff
+ meta map[string]interface{}
+ timeouts *ResourceTimeout
+
+ // Don't set
+ multiReader *MultiLevelFieldReader
+ setWriter *MapFieldWriter
+ newState *terraform.InstanceState
+ partial bool
+ partialMap map[string]struct{}
+ once sync.Once
+ isNew bool
+}
+
+// getResult is the internal structure that is generated when a Get
+// is called that contains some extra data that might be used.
+type getResult struct {
+ Value interface{}
+ ValueProcessed interface{}
+ Computed bool
+ Exists bool
+ Schema *Schema
+}
+
+// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary
+// values, bypassing schema. This MUST NOT be used in normal circumstances -
+// it exists only to support the remote_state data source.
+func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) {
+ d.once.Do(d.init)
+
+ d.setWriter.unsafeWriteField(key, value)
+}
+
+// Get returns the data for the given key, or nil if the key doesn't exist
+// in the schema.
+//
+// If the key does exist in the schema but doesn't exist in the configuration,
+// then the default value for that type will be returned. For strings, this is
+// "", for numbers it is 0, etc.
+//
+// If you want to test if something is set at all in the configuration,
+// use GetOk.
+func (d *ResourceData) Get(key string) interface{} {
+ v, _ := d.GetOk(key)
+ return v
+}
+
+// GetChange returns the old and new value for a given key.
+//
+// HasChange should be used to check if a change exists. It is possible
+// that both the old and new value are the same if the old value was not
+// set and the new value is. This is common, for example, for boolean
+// fields which have a zero value of false.
+func (d *ResourceData) GetChange(key string) (interface{}, interface{}) {
+ o, n := d.getChange(key, getSourceState, getSourceDiff)
+ return o.Value, n.Value
+}
+
+// GetOk returns the data for the given key and whether or not the key
+// has been set to a non-zero value at some point.
+//
+// The first result will not necessarilly be nil if the value doesn't exist.
+// The second result should be checked to determine this information.
+func (d *ResourceData) GetOk(key string) (interface{}, bool) {
+ r := d.getRaw(key, getSourceSet)
+ exists := r.Exists && !r.Computed
+ if exists {
+ // If it exists, we also want to verify it is not the zero-value.
+ value := r.Value
+ zero := r.Schema.Type.Zero()
+
+ if eq, ok := value.(Equal); ok {
+ exists = !eq.Equal(zero)
+ } else {
+ exists = !reflect.DeepEqual(value, zero)
+ }
+ }
+
+ return r.Value, exists
+}
+
+func (d *ResourceData) getRaw(key string, level getSource) getResult {
+ var parts []string
+ if key != "" {
+ parts = strings.Split(key, ".")
+ }
+
+ return d.get(parts, level)
+}
+
+// HasChange returns whether or not the given key has been changed.
+func (d *ResourceData) HasChange(key string) bool {
+ o, n := d.GetChange(key)
+
+ // If the type implements the Equal interface, then call that
+ // instead of just doing a reflect.DeepEqual. An example where this is
+ // needed is *Set
+ if eq, ok := o.(Equal); ok {
+ return !eq.Equal(n)
+ }
+
+ return !reflect.DeepEqual(o, n)
+}
+
+// Partial turns partial state mode on/off.
+//
+// When partial state mode is enabled, then only key prefixes specified
+// by SetPartial will be in the final state. This allows providers to return
+// partial states for partially applied resources (when errors occur).
+func (d *ResourceData) Partial(on bool) {
+ d.partial = on
+ if on {
+ if d.partialMap == nil {
+ d.partialMap = make(map[string]struct{})
+ }
+ } else {
+ d.partialMap = nil
+ }
+}
+
+// Set sets the value for the given key.
+//
+// If the key is invalid or the value is not a correct type, an error
+// will be returned.
+func (d *ResourceData) Set(key string, value interface{}) error {
+ d.once.Do(d.init)
+
+ // If the value is a pointer to a non-struct, get its value and
+ // use that. This allows Set to take a pointer to primitives to
+ // simplify the interface.
+ reflectVal := reflect.ValueOf(value)
+ if reflectVal.Kind() == reflect.Ptr {
+ if reflectVal.IsNil() {
+ // If the pointer is nil, then the value is just nil
+ value = nil
+ } else {
+ // Otherwise, we dereference the pointer as long as its not
+ // a pointer to a struct, since struct pointers are allowed.
+ reflectVal = reflect.Indirect(reflectVal)
+ if reflectVal.Kind() != reflect.Struct {
+ value = reflectVal.Interface()
+ }
+ }
+ }
+
+ return d.setWriter.WriteField(strings.Split(key, "."), value)
+}
+
+// SetPartial adds the key to the final state output while
+// in partial state mode. The key must be a root key in the schema (i.e.
+// it cannot be "list.0").
+//
+// If partial state mode is disabled, then this has no effect. Additionally,
+// whenever partial state mode is toggled, the partial data is cleared.
+func (d *ResourceData) SetPartial(k string) {
+ if d.partial {
+ d.partialMap[k] = struct{}{}
+ }
+}
+
+func (d *ResourceData) MarkNewResource() {
+ d.isNew = true
+}
+
+func (d *ResourceData) IsNewResource() bool {
+ return d.isNew
+}
+
+// Id returns the ID of the resource.
+func (d *ResourceData) Id() string {
+ var result string
+
+ if d.state != nil {
+ result = d.state.ID
+ }
+
+ if d.newState != nil {
+ result = d.newState.ID
+ }
+
+ return result
+}
+
+// ConnInfo returns the connection info for this resource.
+func (d *ResourceData) ConnInfo() map[string]string {
+ if d.newState != nil {
+ return d.newState.Ephemeral.ConnInfo
+ }
+
+ if d.state != nil {
+ return d.state.Ephemeral.ConnInfo
+ }
+
+ return nil
+}
+
+// SetId sets the ID of the resource. If the value is blank, then the
+// resource is destroyed.
+func (d *ResourceData) SetId(v string) {
+ d.once.Do(d.init)
+ d.newState.ID = v
+}
+
+// SetConnInfo sets the connection info for a resource.
+func (d *ResourceData) SetConnInfo(v map[string]string) {
+ d.once.Do(d.init)
+ d.newState.Ephemeral.ConnInfo = v
+}
+
+// SetType sets the ephemeral type for the data. This is only required
+// for importing.
+func (d *ResourceData) SetType(t string) {
+ d.once.Do(d.init)
+ d.newState.Ephemeral.Type = t
+}
+
+// State returns the new InstanceState after the diff and any Set
+// calls.
+func (d *ResourceData) State() *terraform.InstanceState {
+ var result terraform.InstanceState
+ result.ID = d.Id()
+ result.Meta = d.meta
+
+ // If we have no ID, then this resource doesn't exist and we just
+ // return nil.
+ if result.ID == "" {
+ return nil
+ }
+
+ if d.timeouts != nil {
+ if err := d.timeouts.StateEncode(&result); err != nil {
+ log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err)
+ }
+ }
+
+ // Look for a magic key in the schema that determines we skip the
+ // integrity check of fields existing in the schema, allowing dynamic
+ // keys to be created.
+ hasDynamicAttributes := false
+ for k, _ := range d.schema {
+ if k == "__has_dynamic_attributes" {
+ hasDynamicAttributes = true
+ log.Printf("[INFO] Resource %s has dynamic attributes", result.ID)
+ }
+ }
+
+ // In order to build the final state attributes, we read the full
+ // attribute set as a map[string]interface{}, write it to a MapFieldWriter,
+ // and then use that map.
+ rawMap := make(map[string]interface{})
+ for k := range d.schema {
+ source := getSourceSet
+ if d.partial {
+ source = getSourceState
+ if _, ok := d.partialMap[k]; ok {
+ source = getSourceSet
+ }
+ }
+
+ raw := d.get([]string{k}, source)
+ if raw.Exists && !raw.Computed {
+ rawMap[k] = raw.Value
+ if raw.ValueProcessed != nil {
+ rawMap[k] = raw.ValueProcessed
+ }
+ }
+ }
+
+ mapW := &MapFieldWriter{Schema: d.schema}
+ if err := mapW.WriteField(nil, rawMap); err != nil {
+ return nil
+ }
+
+ result.Attributes = mapW.Map()
+
+ if hasDynamicAttributes {
+ // If we have dynamic attributes, just copy the attributes map
+ // one for one into the result attributes.
+ for k, v := range d.setWriter.Map() {
+ // Don't clobber schema values. This limits usage of dynamic
+ // attributes to names which _do not_ conflict with schema
+ // keys!
+ if _, ok := result.Attributes[k]; !ok {
+ result.Attributes[k] = v
+ }
+ }
+ }
+
+ if d.newState != nil {
+ result.Ephemeral = d.newState.Ephemeral
+ }
+
+ // TODO: This is hacky and we can remove this when we have a proper
+ // state writer. We should instead have a proper StateFieldWriter
+ // and use that.
+ for k, schema := range d.schema {
+ if schema.Type != TypeMap {
+ continue
+ }
+
+ if result.Attributes[k] == "" {
+ delete(result.Attributes, k)
+ }
+ }
+
+ if v := d.Id(); v != "" {
+ result.Attributes["id"] = d.Id()
+ }
+
+ if d.state != nil {
+ result.Tainted = d.state.Tainted
+ }
+
+ return &result
+}
+
+// Timeout returns the data for the given timeout key
+// Returns a duration of 20 minutes for any key not found, or not found and no default.
+func (d *ResourceData) Timeout(key string) time.Duration {
+ key = strings.ToLower(key)
+
+ var timeout *time.Duration
+ switch key {
+ case TimeoutCreate:
+ timeout = d.timeouts.Create
+ case TimeoutRead:
+ timeout = d.timeouts.Read
+ case TimeoutUpdate:
+ timeout = d.timeouts.Update
+ case TimeoutDelete:
+ timeout = d.timeouts.Delete
+ }
+
+ if timeout != nil {
+ return *timeout
+ }
+
+ if d.timeouts.Default != nil {
+ return *d.timeouts.Default
+ }
+
+ // Return system default of 20 minutes
+ return 20 * time.Minute
+}
+
+func (d *ResourceData) init() {
+ // Initialize the field that will store our new state
+ var copyState terraform.InstanceState
+ if d.state != nil {
+ copyState = *d.state.DeepCopy()
+ }
+ d.newState = &copyState
+
+ // Initialize the map for storing set data
+ d.setWriter = &MapFieldWriter{Schema: d.schema}
+
+ // Initialize the reader for getting data from the
+ // underlying sources (config, diff, etc.)
+ readers := make(map[string]FieldReader)
+ var stateAttributes map[string]string
+ if d.state != nil {
+ stateAttributes = d.state.Attributes
+ readers["state"] = &MapFieldReader{
+ Schema: d.schema,
+ Map: BasicMapReader(stateAttributes),
+ }
+ }
+ if d.config != nil {
+ readers["config"] = &ConfigFieldReader{
+ Schema: d.schema,
+ Config: d.config,
+ }
+ }
+ if d.diff != nil {
+ readers["diff"] = &DiffFieldReader{
+ Schema: d.schema,
+ Diff: d.diff,
+ Source: &MultiLevelFieldReader{
+ Levels: []string{"state", "config"},
+ Readers: readers,
+ },
+ }
+ }
+ readers["set"] = &MapFieldReader{
+ Schema: d.schema,
+ Map: BasicMapReader(d.setWriter.Map()),
+ }
+ d.multiReader = &MultiLevelFieldReader{
+ Levels: []string{
+ "state",
+ "config",
+ "diff",
+ "set",
+ },
+
+ Readers: readers,
+ }
+}
+
+func (d *ResourceData) diffChange(
+ k string) (interface{}, interface{}, bool, bool) {
+ // Get the change between the state and the config.
+ o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact)
+ if !o.Exists {
+ o.Value = nil
+ }
+ if !n.Exists {
+ n.Value = nil
+ }
+
+ // Return the old, new, and whether there is a change
+ return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed
+}
+
+func (d *ResourceData) getChange(
+ k string,
+ oldLevel getSource,
+ newLevel getSource) (getResult, getResult) {
+ var parts, parts2 []string
+ if k != "" {
+ parts = strings.Split(k, ".")
+ parts2 = strings.Split(k, ".")
+ }
+
+ o := d.get(parts, oldLevel)
+ n := d.get(parts2, newLevel)
+ return o, n
+}
+
+func (d *ResourceData) get(addr []string, source getSource) getResult {
+ d.once.Do(d.init)
+
+ level := "set"
+ flags := source & ^getSourceLevelMask
+ exact := flags&getSourceExact != 0
+ source = source & getSourceLevelMask
+ if source >= getSourceSet {
+ level = "set"
+ } else if source >= getSourceDiff {
+ level = "diff"
+ } else if source >= getSourceConfig {
+ level = "config"
+ } else {
+ level = "state"
+ }
+
+ var result FieldReadResult
+ var err error
+ if exact {
+ result, err = d.multiReader.ReadFieldExact(addr, level)
+ } else {
+ result, err = d.multiReader.ReadFieldMerge(addr, level)
+ }
+ if err != nil {
+ panic(err)
+ }
+
+ // If the result doesn't exist, then we set the value to the zero value
+ var schema *Schema
+ if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 {
+ schema = schemaL[len(schemaL)-1]
+ }
+
+ if result.Value == nil && schema != nil {
+ result.Value = result.ValueOrZero(schema)
+ }
+
+ // Transform the FieldReadResult into a getResult. It might be worth
+ // merging these two structures one day.
+ return getResult{
+ Value: result.Value,
+ ValueProcessed: result.ValueProcessed,
+ Computed: result.Computed,
+ Exists: result.Exists,
+ Schema: schema,
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go
new file mode 100644
index 00000000..7dd655de
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go
@@ -0,0 +1,17 @@
+package schema
+
+//go:generate stringer -type=getSource resource_data_get_source.go
+
+// getSource represents the level we want to get for a value (internally).
+// Any source less than or equal to the level will be loaded (whichever
+// has a value first).
+type getSource byte
+
+const (
+ getSourceState getSource = 1 << iota
+ getSourceConfig
+ getSourceDiff
+ getSourceSet
+ getSourceExact // Only get from the _exact_ level
+ getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet
+)
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go
new file mode 100644
index 00000000..5dada3ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go
@@ -0,0 +1,52 @@
+package schema
+
+// ResourceImporter defines how a resource is imported in Terraform. This
+// can be set onto a Resource struct to make it Importable. Not all resources
+// have to be importable; if a Resource doesn't have a ResourceImporter then
+// it won't be importable.
+//
+// "Importing" in Terraform is the process of taking an already-created
+// resource and bringing it under Terraform management. This can include
+// updating Terraform state, generating Terraform configuration, etc.
+type ResourceImporter struct {
+ // The functions below must all be implemented for importing to work.
+
+ // State is called to convert an ID to one or more InstanceState to
+ // insert into the Terraform state. If this isn't specified, then
+ // the ID is passed straight through.
+ State StateFunc
+}
+
+// StateFunc is the function called to import a resource into the
+// Terraform state. It is given a ResourceData with only ID set. This
+// ID is going to be an arbitrary value given by the user and may not map
+// directly to the ID format that the resource expects, so that should
+// be validated.
+//
+// This should return a slice of ResourceData that turn into the state
+// that was imported. This might be as simple as returning only the argument
+// that was given to the function. In other cases (such as AWS security groups),
+// an import may fan out to multiple resources and this will have to return
+// multiple.
+//
+// To create the ResourceData structures for other resource types (if
+// you have to), instantiate your resource and call the Data function.
+type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error)
+
+// InternalValidate should be called to validate the structure of this
+// importer. This should be called in a unit test.
+//
+// Resource.InternalValidate() will automatically call this, so this doesn't
+// need to be called manually. Further, Resource.InternalValidate() is
+// automatically called by Provider.InternalValidate(), so you only need
+// to internal validate the provider.
+func (r *ResourceImporter) InternalValidate() error {
+ return nil
+}
+
+// ImportStatePassthrough is an implementation of StateFunc that can be
+// used to simply pass the ID directly through. This should be used only
+// in the case that an ID-only refresh is possible.
+func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) {
+ return []*ResourceData{d}, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
new file mode 100644
index 00000000..445819f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
@@ -0,0 +1,237 @@
+package schema
+
+import (
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/copystructure"
+)
+
+const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0"
+const TimeoutsConfigKey = "timeouts"
+
+const (
+ TimeoutCreate = "create"
+ TimeoutRead = "read"
+ TimeoutUpdate = "update"
+ TimeoutDelete = "delete"
+ TimeoutDefault = "default"
+)
+
+func timeoutKeys() []string {
+ return []string{
+ TimeoutCreate,
+ TimeoutRead,
+ TimeoutUpdate,
+ TimeoutDelete,
+ TimeoutDefault,
+ }
+}
+
+// could be time.Duration, int64 or float64
+func DefaultTimeout(tx interface{}) *time.Duration {
+ var td time.Duration
+ switch raw := tx.(type) {
+ case time.Duration:
+ return &raw
+ case int64:
+ td = time.Duration(raw)
+ case float64:
+ td = time.Duration(int64(raw))
+ default:
+ log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx)
+ }
+ return &td
+}
+
+type ResourceTimeout struct {
+ Create, Read, Update, Delete, Default *time.Duration
+}
+
+// ConfigDecode takes a schema and the configuration (available in Diff) and
+// validates, parses the timeouts into `t`
+func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) error {
+ if s.Timeouts != nil {
+ raw, err := copystructure.Copy(s.Timeouts)
+ if err != nil {
+ log.Printf("[DEBUG] Error with deep copy: %s", err)
+ }
+ *t = *raw.(*ResourceTimeout)
+ }
+
+ if raw, ok := c.Config[TimeoutsConfigKey]; ok {
+ if configTimeouts, ok := raw.([]map[string]interface{}); ok {
+ for _, timeoutValues := range configTimeouts {
+ // loop through each Timeout given in the configuration and validate they
+ // the Timeout defined in the resource
+ for timeKey, timeValue := range timeoutValues {
+ // validate that we're dealing with the normal CRUD actions
+ var found bool
+ for _, key := range timeoutKeys() {
+ if timeKey == key {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey)
+ }
+
+ // Get timeout
+ rt, err := time.ParseDuration(timeValue.(string))
+ if err != nil {
+ return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err)
+ }
+
+ var timeout *time.Duration
+ switch timeKey {
+ case TimeoutCreate:
+ timeout = t.Create
+ case TimeoutUpdate:
+ timeout = t.Update
+ case TimeoutRead:
+ timeout = t.Read
+ case TimeoutDelete:
+ timeout = t.Delete
+ case TimeoutDefault:
+ timeout = t.Default
+ }
+
+ // If the resource has not delcared this in the definition, then error
+ // with an unsupported message
+ if timeout == nil {
+ return unsupportedTimeoutKeyError(timeKey)
+ }
+
+ *timeout = rt
+ }
+ }
+ } else {
+ log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts")
+ }
+ }
+
+ return nil
+}
+
+func unsupportedTimeoutKeyError(key string) error {
+ return fmt.Errorf("Timeout Key (%s) is not supported", key)
+}
+
+// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder
+// interface: they encode/decode a timeouts struct from an instance diff, which is
+// where the timeout data is stored after a diff to pass into Apply.
+//
+// StateEncode encodes the timeout into the ResourceData's InstanceState for
+// saving to state
+//
+func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error {
+ return t.metaEncode(id)
+}
+
+func (t *ResourceTimeout) StateEncode(is *terraform.InstanceState) error {
+ return t.metaEncode(is)
+}
+
+// metaEncode encodes the ResourceTimeout into a map[string]interface{} format
+// and stores it in the Meta field of the interface it's given.
+// Assumes the interface is either *terraform.InstanceState or
+// *terraform.InstanceDiff, returns an error otherwise
+func (t *ResourceTimeout) metaEncode(ids interface{}) error {
+ m := make(map[string]interface{})
+
+ if t.Create != nil {
+ m[TimeoutCreate] = t.Create.Nanoseconds()
+ }
+ if t.Read != nil {
+ m[TimeoutRead] = t.Read.Nanoseconds()
+ }
+ if t.Update != nil {
+ m[TimeoutUpdate] = t.Update.Nanoseconds()
+ }
+ if t.Delete != nil {
+ m[TimeoutDelete] = t.Delete.Nanoseconds()
+ }
+ if t.Default != nil {
+ m[TimeoutDefault] = t.Default.Nanoseconds()
+ // for any key above that is nil, if default is specified, we need to
+ // populate it with the default
+ for _, k := range timeoutKeys() {
+ if _, ok := m[k]; !ok {
+ m[k] = t.Default.Nanoseconds()
+ }
+ }
+ }
+
+ // only add the Timeout to the Meta if we have values
+ if len(m) > 0 {
+ switch instance := ids.(type) {
+ case *terraform.InstanceDiff:
+ if instance.Meta == nil {
+ instance.Meta = make(map[string]interface{})
+ }
+ instance.Meta[TimeoutKey] = m
+ case *terraform.InstanceState:
+ if instance.Meta == nil {
+ instance.Meta = make(map[string]interface{})
+ }
+ instance.Meta[TimeoutKey] = m
+ default:
+ return fmt.Errorf("Error matching type for Diff Encode")
+ }
+ }
+
+ return nil
+}
+
+func (t *ResourceTimeout) StateDecode(id *terraform.InstanceState) error {
+ return t.metaDecode(id)
+}
+func (t *ResourceTimeout) DiffDecode(is *terraform.InstanceDiff) error {
+ return t.metaDecode(is)
+}
+
+func (t *ResourceTimeout) metaDecode(ids interface{}) error {
+ var rawMeta interface{}
+ var ok bool
+ switch rawInstance := ids.(type) {
+ case *terraform.InstanceDiff:
+ rawMeta, ok = rawInstance.Meta[TimeoutKey]
+ if !ok {
+ return nil
+ }
+ case *terraform.InstanceState:
+ rawMeta, ok = rawInstance.Meta[TimeoutKey]
+ if !ok {
+ return nil
+ }
+ default:
+ return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids)
+ }
+
+ times := rawMeta.(map[string]interface{})
+ if len(times) == 0 {
+ return nil
+ }
+
+ if v, ok := times[TimeoutCreate]; ok {
+ t.Create = DefaultTimeout(v)
+ }
+ if v, ok := times[TimeoutRead]; ok {
+ t.Read = DefaultTimeout(v)
+ }
+ if v, ok := times[TimeoutUpdate]; ok {
+ t.Update = DefaultTimeout(v)
+ }
+ if v, ok := times[TimeoutDelete]; ok {
+ t.Delete = DefaultTimeout(v)
+ }
+ if v, ok := times[TimeoutDefault]; ok {
+ t.Default = DefaultTimeout(v)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
new file mode 100644
index 00000000..32d17213
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
@@ -0,0 +1,1537 @@
+// schema is a high-level framework for easily writing new providers
+// for Terraform. Usage of schema is recommended over attempting to write
+// to the low-level plugin interfaces manually.
+//
+// schema breaks down provider creation into simple CRUD operations for
+// resources. The logic of diffing, destroying before creating, updating
+// or creating, etc. is all handled by the framework. The plugin author
+// only needs to implement a configuration schema and the CRUD operations and
+// everything else is meant to just work.
+//
+// A good starting point is to view the Provider structure.
+package schema
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/mapstructure"
+)
+
+// type used for schema package context keys
+type contextKey string
+
+// Schema is used to describe the structure of a value.
+//
+// Read the documentation of the struct elements for important details.
+type Schema struct {
+ // Type is the type of the value and must be one of the ValueType values.
+ //
+ // This type not only determines what type is expected/valid in configuring
+ // this value, but also what type is returned when ResourceData.Get is
+ // called. The types returned by Get are:
+ //
+ // TypeBool - bool
+ // TypeInt - int
+ // TypeFloat - float64
+ // TypeString - string
+ // TypeList - []interface{}
+ // TypeMap - map[string]interface{}
+ // TypeSet - *schema.Set
+ //
+ Type ValueType
+
+ // If one of these is set, then this item can come from the configuration.
+ // Both cannot be set. If Optional is set, the value is optional. If
+ // Required is set, the value is required.
+ //
+ // One of these must be set if the value is not computed. That is:
+ // value either comes from the config, is computed, or is both.
+ Optional bool
+ Required bool
+
+ // If this is non-nil, the provided function will be used during diff
+ // of this field. If this is nil, a default diff for the type of the
+ // schema will be used.
+ //
+ // This allows comparison based on something other than primitive, list
+ // or map equality - for example SSH public keys may be considered
+ // equivalent regardless of trailing whitespace.
+ DiffSuppressFunc SchemaDiffSuppressFunc
+
+ // If this is non-nil, then this will be a default value that is used
+ // when this item is not set in the configuration.
+ //
+ // DefaultFunc can be specified to compute a dynamic default.
+ // Only one of Default or DefaultFunc can be set. If DefaultFunc is
+ // used then its return value should be stable to avoid generating
+ // confusing/perpetual diffs.
+ //
+ // Changing either Default or the return value of DefaultFunc can be
+ // a breaking change, especially if the attribute in question has
+ // ForceNew set. If a default needs to change to align with changing
+ // assumptions in an upstream API then it may be necessary to also use
+ // the MigrateState function on the resource to change the state to match,
+ // or have the Read function adjust the state value to align with the
+ // new default.
+ //
+ // If Required is true above, then Default cannot be set. DefaultFunc
+ // can be set with Required. If the DefaultFunc returns nil, then there
+ // will be no default and the user will be asked to fill it in.
+ //
+ // If either of these is set, then the user won't be asked for input
+ // for this key if the default is not nil.
+ Default interface{}
+ DefaultFunc SchemaDefaultFunc
+
+ // Description is used as the description for docs or asking for user
+ // input. It should be relatively short (a few sentences max) and should
+ // be formatted to fit a CLI.
+ Description string
+
+ // InputDefault is the default value to use for when inputs are requested.
+ // This differs from Default in that if Default is set, no input is
+ // asked for. If Input is asked, this will be the default value offered.
+ InputDefault string
+
+ // The fields below relate to diffs.
+ //
+ // If Computed is true, then the result of this value is computed
+ // (unless specified by config) on creation.
+ //
+ // If ForceNew is true, then a change in this resource necessitates
+ // the creation of a new resource.
+ //
+ // StateFunc is a function called to change the value of this before
+ // storing it in the state (and likewise before comparing for diffs).
+ // The use for this is for example with large strings, you may want
+ // to simply store the hash of it.
+ Computed bool
+ ForceNew bool
+ StateFunc SchemaStateFunc
+
+ // The following fields are only set for a TypeList or TypeSet Type.
+ //
+ // Elem must be either a *Schema or a *Resource only if the Type is
+ // TypeList, and represents what the element type is. If it is *Schema,
+ // the element type is just a simple value. If it is *Resource, the
+ // element type is a complex structure, potentially with its own lifecycle.
+ //
+ // MaxItems defines a maximum amount of items that can exist within a
+ // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
+ // used to wrap a complex structure, however more than one instance would
+ // cause instability.
+ //
+ // MinItems defines a minimum amount of items that can exist within a
+ // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
+ // used to wrap a complex structure, however less than one instance would
+ // cause instability.
+ //
+ // PromoteSingle, if true, will allow single elements to be standalone
+ // and promote them to a list. For example "foo" would be promoted to
+ // ["foo"] automatically. This is primarily for legacy reasons and the
+ // ambiguity is not recommended for new usage. Promotion is only allowed
+ // for primitive element types.
+ Elem interface{}
+ MaxItems int
+ MinItems int
+ PromoteSingle bool
+
+ // The following fields are only valid for a TypeSet type.
+ //
+ // Set defines a function to determine the unique ID of an item so that
+ // a proper set can be built.
+ Set SchemaSetFunc
+
+ // ComputedWhen is a set of queries on the configuration. Whenever any
+ // of these things is changed, it will require a recompute (this requires
+ // that Computed is set to true).
+ //
+ // NOTE: This currently does not work.
+ ComputedWhen []string
+
+ // ConflictsWith is a set of schema keys that conflict with this schema.
+ // This will only check that they're set in the _config_. This will not
+ // raise an error for a malfunctioning resource that sets a conflicting
+ // key.
+ ConflictsWith []string
+
+ // When Deprecated is set, this attribute is deprecated.
+ //
+ // A deprecated field still works, but will probably stop working in near
+ // future. This string is the message shown to the user with instructions on
+ // how to address the deprecation.
+ Deprecated string
+
+ // When Removed is set, this attribute has been removed from the schema
+ //
+ // Removed attributes can be left in the Schema to generate informative error
+ // messages for the user when they show up in resource configurations.
+ // This string is the message shown to the user with instructions on
+ // what do to about the removed attribute.
+ Removed string
+
+ // ValidateFunc allows individual fields to define arbitrary validation
+ // logic. It is yielded the provided config value as an interface{} that is
+ // guaranteed to be of the proper Schema type, and it can yield warnings or
+ // errors based on inspection of that value.
+ //
+ // ValidateFunc currently only works for primitive types.
+ ValidateFunc SchemaValidateFunc
+
+ // Sensitive ensures that the attribute's value does not get displayed in
+ // logs or regular output. It should be used for passwords or other
+ // secret fields. Future versions of Terraform may encrypt these
+ // values.
+ Sensitive bool
+}
+
+// SchemaDiffSuppresFunc is a function which can be used to determine
+// whether a detected diff on a schema element is "valid" or not, and
+// suppress it from the plan if necessary.
+//
+// Return true if the diff should be suppressed, false to retain it.
+type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool
+
+// SchemaDefaultFunc is a function called to return a default value for
+// a field.
+type SchemaDefaultFunc func() (interface{}, error)
+
+// EnvDefaultFunc is a helper function that returns the value of the
+// given environment variable, if one exists, or the default value
+// otherwise.
+func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc {
+ return func() (interface{}, error) {
+ if v := os.Getenv(k); v != "" {
+ return v, nil
+ }
+
+ return dv, nil
+ }
+}
+
+// MultiEnvDefaultFunc is a helper function that returns the value of the first
+// environment variable in the given list that returns a non-empty value. If
+// none of the environment variables return a value, the default value is
+// returned.
+func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc {
+ return func() (interface{}, error) {
+ for _, k := range ks {
+ if v := os.Getenv(k); v != "" {
+ return v, nil
+ }
+ }
+ return dv, nil
+ }
+}
+
+// SchemaSetFunc is a function that must return a unique ID for the given
+// element. This unique ID is used to store the element in a hash.
+type SchemaSetFunc func(interface{}) int
+
+// SchemaStateFunc is a function used to convert some type to a string
+// to be stored in the state.
+type SchemaStateFunc func(interface{}) string
+
+// SchemaValidateFunc is a function used to validate a single field in the
+// schema.
+type SchemaValidateFunc func(interface{}, string) ([]string, []error)
+
+func (s *Schema) GoString() string {
+ return fmt.Sprintf("*%#v", *s)
+}
+
+// Returns a default value for this schema by either reading Default or
+// evaluating DefaultFunc. If neither of these are defined, returns nil.
+func (s *Schema) DefaultValue() (interface{}, error) {
+ if s.Default != nil {
+ return s.Default, nil
+ }
+
+ if s.DefaultFunc != nil {
+ defaultValue, err := s.DefaultFunc()
+ if err != nil {
+ return nil, fmt.Errorf("error loading default: %s", err)
+ }
+ return defaultValue, nil
+ }
+
+ return nil, nil
+}
+
+// Returns a zero value for the schema.
+func (s *Schema) ZeroValue() interface{} {
+ // If it's a set then we'll do a bit of extra work to provide the
+ // right hashing function in our empty value.
+ if s.Type == TypeSet {
+ setFunc := s.Set
+ if setFunc == nil {
+ // Default set function uses the schema to hash the whole value
+ elem := s.Elem
+ switch t := elem.(type) {
+ case *Schema:
+ setFunc = HashSchema(t)
+ case *Resource:
+ setFunc = HashResource(t)
+ default:
+ panic("invalid set element type")
+ }
+ }
+ return &Set{F: setFunc}
+ } else {
+ return s.Type.Zero()
+ }
+}
+
+func (s *Schema) finalizeDiff(
+ d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff {
+ if d == nil {
+ return d
+ }
+
+ if s.Type == TypeBool {
+ normalizeBoolString := func(s string) string {
+ switch s {
+ case "0":
+ return "false"
+ case "1":
+ return "true"
+ }
+ return s
+ }
+ d.Old = normalizeBoolString(d.Old)
+ d.New = normalizeBoolString(d.New)
+ }
+
+ if s.Computed && !d.NewRemoved && d.New == "" {
+ // Computed attribute without a new value set
+ d.NewComputed = true
+ }
+
+ if s.ForceNew {
+ // ForceNew, mark that this field is requiring new under the
+ // following conditions, explained below:
+ //
+ // * Old != New - There is a change in value. This field
+ // is therefore causing a new resource.
+ //
+ // * NewComputed - This field is being computed, hence a
+ // potential change in value, mark as causing a new resource.
+ d.RequiresNew = d.Old != d.New || d.NewComputed
+ }
+
+ if d.NewRemoved {
+ return d
+ }
+
+ if s.Computed {
+ if d.Old != "" && d.New == "" {
+ // This is a computed value with an old value set already,
+ // just let it go.
+ return nil
+ }
+
+ if d.New == "" {
+ // Computed attribute without a new value set
+ d.NewComputed = true
+ }
+ }
+
+ if s.Sensitive {
+ // Set the Sensitive flag so output is hidden in the UI
+ d.Sensitive = true
+ }
+
+ return d
+}
+
+// schemaMap is a wrapper that adds nice functions on top of schemas.
+type schemaMap map[string]*Schema
+
+// Data returns a ResourceData for the given schema, state, and diff.
+//
+// The diff is optional.
+func (m schemaMap) Data(
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff) (*ResourceData, error) {
+ return &ResourceData{
+ schema: m,
+ state: s,
+ diff: d,
+ }, nil
+}
+
+// Diff returns the diff for a resource given the schema map,
+// state, and configuration.
+func (m schemaMap) Diff(
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+ result := new(terraform.InstanceDiff)
+ result.Attributes = make(map[string]*terraform.ResourceAttrDiff)
+
+ // Make sure to mark if the resource is tainted
+ if s != nil {
+ result.DestroyTainted = s.Tainted
+ }
+
+ d := &ResourceData{
+ schema: m,
+ state: s,
+ config: c,
+ }
+
+ for k, schema := range m {
+ err := m.diff(k, schema, result, d, false)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // If the diff requires a new resource, then we recompute the diff
+ // so we have the complete new resource diff, and preserve the
+ // RequiresNew fields where necessary so the user knows exactly what
+ // caused that.
+ if result.RequiresNew() {
+ // Create the new diff
+ result2 := new(terraform.InstanceDiff)
+ result2.Attributes = make(map[string]*terraform.ResourceAttrDiff)
+
+ // Preserve the DestroyTainted flag
+ result2.DestroyTainted = result.DestroyTainted
+
+ // Reset the data to not contain state. We have to call init()
+ // again in order to reset the FieldReaders.
+ d.state = nil
+ d.init()
+
+ // Perform the diff again
+ for k, schema := range m {
+ err := m.diff(k, schema, result2, d, false)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Force all the fields to not force a new since we know what we
+ // want to force new.
+ for k, attr := range result2.Attributes {
+ if attr == nil {
+ continue
+ }
+
+ if attr.RequiresNew {
+ attr.RequiresNew = false
+ }
+
+ if s != nil {
+ attr.Old = s.Attributes[k]
+ }
+ }
+
+ // Now copy in all the requires new diffs...
+ for k, attr := range result.Attributes {
+ if attr == nil {
+ continue
+ }
+
+ newAttr, ok := result2.Attributes[k]
+ if !ok {
+ newAttr = attr
+ }
+
+ if attr.RequiresNew {
+ newAttr.RequiresNew = true
+ }
+
+ result2.Attributes[k] = newAttr
+ }
+
+ // And set the diff!
+ result = result2
+ }
+
+ // Remove any nil diffs just to keep things clean
+ for k, v := range result.Attributes {
+ if v == nil {
+ delete(result.Attributes, k)
+ }
+ }
+
+ // Go through and detect all of the ComputedWhens now that we've
+ // finished the diff.
+ // TODO
+
+ if result.Empty() {
+ // If we don't have any diff elements, just return nil
+ return nil, nil
+ }
+
+ return result, nil
+}
+
+// Input implements the terraform.ResourceProvider method by asking
+// for input for required configuration keys that don't have a value.
+func (m schemaMap) Input(
+ input terraform.UIInput,
+ c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+ keys := make([]string, 0, len(m))
+ for k, _ := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := m[k]
+
+ // Skip things that don't require config, if that is even valid
+ // for a provider schema.
+ // Required XOR Optional must always be true to validate, so we only
+ // need to check one.
+ if v.Optional {
+ continue
+ }
+
+ // Deprecated fields should never prompt
+ if v.Deprecated != "" {
+ continue
+ }
+
+ // Skip things that have a value of some sort already
+ if _, ok := c.Raw[k]; ok {
+ continue
+ }
+
+ // Skip if it has a default value
+ defaultValue, err := v.DefaultValue()
+ if err != nil {
+ return nil, fmt.Errorf("%s: error loading default: %s", k, err)
+ }
+ if defaultValue != nil {
+ continue
+ }
+
+ var value interface{}
+ switch v.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList:
+ continue
+ case TypeString:
+ value, err = m.inputString(input, k, v)
+ default:
+ panic(fmt.Sprintf("Unknown type for input: %#v", v.Type))
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf(
+ "%s: %s", k, err)
+ }
+
+ c.Config[k] = value
+ }
+
+ return c, nil
+}
+
+// Validate validates the configuration against this schema mapping.
+func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ return m.validateObject("", m, c)
+}
+
+// InternalValidate validates the format of this schema. This should be called
+// from a unit test (and not in user-path code) to verify that a schema
+// is properly built.
+func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
+ if topSchemaMap == nil {
+ topSchemaMap = m
+ }
+ for k, v := range m {
+ if v.Type == TypeInvalid {
+ return fmt.Errorf("%s: Type must be specified", k)
+ }
+
+ if v.Optional && v.Required {
+ return fmt.Errorf("%s: Optional or Required must be set, not both", k)
+ }
+
+ if v.Required && v.Computed {
+ return fmt.Errorf("%s: Cannot be both Required and Computed", k)
+ }
+
+ if !v.Required && !v.Optional && !v.Computed {
+ return fmt.Errorf("%s: One of optional, required, or computed must be set", k)
+ }
+
+ if v.Computed && v.Default != nil {
+ return fmt.Errorf("%s: Default must be nil if computed", k)
+ }
+
+ if v.Required && v.Default != nil {
+ return fmt.Errorf("%s: Default cannot be set with Required", k)
+ }
+
+ if len(v.ComputedWhen) > 0 && !v.Computed {
+ return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k)
+ }
+
+ if len(v.ConflictsWith) > 0 && v.Required {
+ return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k)
+ }
+
+ if len(v.ConflictsWith) > 0 {
+ for _, key := range v.ConflictsWith {
+ parts := strings.Split(key, ".")
+ sm := topSchemaMap
+ var target *Schema
+ for _, part := range parts {
+ // Skip index fields
+ if _, err := strconv.Atoi(part); err == nil {
+ continue
+ }
+
+ var ok bool
+ if target, ok = sm[part]; !ok {
+ return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s)", k, key)
+ }
+
+ if subResource, ok := target.Elem.(*Resource); ok {
+ sm = schemaMap(subResource.Schema)
+ }
+ }
+ if target == nil {
+ return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm)
+ }
+ if target.Required {
+ return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key)
+ }
+
+ if len(target.ComputedWhen) > 0 {
+ return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key)
+ }
+ }
+ }
+
+ if v.Type == TypeList || v.Type == TypeSet {
+ if v.Elem == nil {
+ return fmt.Errorf("%s: Elem must be set for lists", k)
+ }
+
+ if v.Default != nil {
+ return fmt.Errorf("%s: Default is not valid for lists or sets", k)
+ }
+
+ if v.Type != TypeSet && v.Set != nil {
+ return fmt.Errorf("%s: Set can only be set for TypeSet", k)
+ }
+
+ switch t := v.Elem.(type) {
+ case *Resource:
+ if err := t.InternalValidate(topSchemaMap, true); err != nil {
+ return err
+ }
+ case *Schema:
+ bad := t.Computed || t.Optional || t.Required
+ if bad {
+ return fmt.Errorf(
+ "%s: Elem must have only Type set", k)
+ }
+ }
+ } else {
+ if v.MaxItems > 0 || v.MinItems > 0 {
+ return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k)
+ }
+ }
+
+ // Computed-only field
+ if v.Computed && !v.Optional {
+ if v.ValidateFunc != nil {
+ return fmt.Errorf("%s: ValidateFunc is for validating user input, "+
+ "there's nothing to validate on computed-only field", k)
+ }
+ if v.DiffSuppressFunc != nil {
+ return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+
+ " between config and state representation. "+
+ "There is no config for computed-only field, nothing to compare.", k)
+ }
+ }
+
+ if v.ValidateFunc != nil {
+ switch v.Type {
+ case TypeList, TypeSet:
+ return fmt.Errorf("ValidateFunc is not yet supported on lists or sets.")
+ }
+ }
+ }
+
+ return nil
+}
+
+func (m schemaMap) diff(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+
+ unsupressedDiff := new(terraform.InstanceDiff)
+ unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff)
+
+ var err error
+ switch schema.Type {
+ case TypeBool, TypeInt, TypeFloat, TypeString:
+ err = m.diffString(k, schema, unsupressedDiff, d, all)
+ case TypeList:
+ err = m.diffList(k, schema, unsupressedDiff, d, all)
+ case TypeMap:
+ err = m.diffMap(k, schema, unsupressedDiff, d, all)
+ case TypeSet:
+ err = m.diffSet(k, schema, unsupressedDiff, d, all)
+ default:
+ err = fmt.Errorf("%s: unknown type %#v", k, schema.Type)
+ }
+
+ for attrK, attrV := range unsupressedDiff.Attributes {
+ if schema.DiffSuppressFunc != nil &&
+ attrV != nil &&
+ schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, d) {
+ continue
+ }
+
+ diff.Attributes[attrK] = attrV
+ }
+
+ return err
+}
+
+func (m schemaMap) diffList(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+ o, n, _, computedList := d.diffChange(k)
+ if computedList {
+ n = nil
+ }
+ nSet := n != nil
+
+ // If we have an old value and no new value is set or will be
+ // computed once all variables can be interpolated and we're
+ // computed, then nothing has changed.
+ if o != nil && n == nil && !computedList && schema.Computed {
+ return nil
+ }
+
+ if o == nil {
+ o = []interface{}{}
+ }
+ if n == nil {
+ n = []interface{}{}
+ }
+ if s, ok := o.(*Set); ok {
+ o = s.List()
+ }
+ if s, ok := n.(*Set); ok {
+ n = s.List()
+ }
+ os := o.([]interface{})
+ vs := n.([]interface{})
+
+ // If the new value was set, and the two are equal, then we're done.
+ // We have to do this check here because sets might be NOT
+ // reflect.DeepEqual so we need to wait until we get the []interface{}
+ if !all && nSet && reflect.DeepEqual(os, vs) {
+ return nil
+ }
+
+ // Get the counts
+ oldLen := len(os)
+ newLen := len(vs)
+ oldStr := strconv.FormatInt(int64(oldLen), 10)
+
+ // If the whole list is computed, then say that the # is computed
+ if computedList {
+ diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{
+ Old: oldStr,
+ NewComputed: true,
+ RequiresNew: schema.ForceNew,
+ }
+ return nil
+ }
+
+ // If the counts are not the same, then record that diff
+ changed := oldLen != newLen
+ computed := oldLen == 0 && newLen == 0 && schema.Computed
+ if changed || computed || all {
+ countSchema := &Schema{
+ Type: TypeInt,
+ Computed: schema.Computed,
+ ForceNew: schema.ForceNew,
+ }
+
+ newStr := ""
+ if !computed {
+ newStr = strconv.FormatInt(int64(newLen), 10)
+ } else {
+ oldStr = ""
+ }
+
+ diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: oldStr,
+ New: newStr,
+ })
+ }
+
+ // Figure out the maximum
+ maxLen := oldLen
+ if newLen > maxLen {
+ maxLen = newLen
+ }
+
+ switch t := schema.Elem.(type) {
+ case *Resource:
+ // This is a complex resource
+ for i := 0; i < maxLen; i++ {
+ for k2, schema := range t.Schema {
+ subK := fmt.Sprintf("%s.%d.%s", k, i, k2)
+ err := m.diff(subK, schema, diff, d, all)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ case *Schema:
+ // Copy the schema so that we can set Computed/ForceNew from
+ // the parent schema (the TypeList).
+ t2 := *t
+ t2.ForceNew = schema.ForceNew
+
+ // This is just a primitive element, so go through each and
+ // just diff each.
+ for i := 0; i < maxLen; i++ {
+ subK := fmt.Sprintf("%s.%d", k, i)
+ err := m.diff(subK, &t2, diff, d, all)
+ if err != nil {
+ return err
+ }
+ }
+ default:
+ return fmt.Errorf("%s: unknown element type (internal)", k)
+ }
+
+ return nil
+}
+
+func (m schemaMap) diffMap(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+ prefix := k + "."
+
+ // First get all the values from the state
+ var stateMap, configMap map[string]string
+ o, n, _, nComputed := d.diffChange(k)
+ if err := mapstructure.WeakDecode(o, &stateMap); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ if err := mapstructure.WeakDecode(n, &configMap); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+
+ // Keep track of whether the state _exists_ at all prior to clearing it
+ stateExists := o != nil
+
+ // Delete any count values, since we don't use those
+ delete(configMap, "%")
+ delete(stateMap, "%")
+
+ // Check if the number of elements has changed.
+ oldLen, newLen := len(stateMap), len(configMap)
+ changed := oldLen != newLen
+ if oldLen != 0 && newLen == 0 && schema.Computed {
+ changed = false
+ }
+
+ // It is computed if we have no old value, no new value, the schema
+ // says it is computed, and it didn't exist in the state before. The
+ // last point means: if it existed in the state, even empty, then it
+ // has already been computed.
+ computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists
+
+ // If the count has changed or we're computed, then add a diff for the
+ // count. "nComputed" means that the new value _contains_ a value that
+ // is computed. We don't do granular diffs for this yet, so we mark the
+ // whole map as computed.
+ if changed || computed || nComputed {
+ countSchema := &Schema{
+ Type: TypeInt,
+ Computed: schema.Computed || nComputed,
+ ForceNew: schema.ForceNew,
+ }
+
+ oldStr := strconv.FormatInt(int64(oldLen), 10)
+ newStr := ""
+ if !computed && !nComputed {
+ newStr = strconv.FormatInt(int64(newLen), 10)
+ } else {
+ oldStr = ""
+ }
+
+ diff.Attributes[k+".%"] = countSchema.finalizeDiff(
+ &terraform.ResourceAttrDiff{
+ Old: oldStr,
+ New: newStr,
+ },
+ )
+ }
+
+ // If the new map is nil and we're computed, then ignore it.
+ if n == nil && schema.Computed {
+ return nil
+ }
+
+ // Now we compare, preferring values from the config map
+ for k, v := range configMap {
+ old, ok := stateMap[k]
+ delete(stateMap, k)
+
+ if old == v && ok && !all {
+ continue
+ }
+
+ diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: old,
+ New: v,
+ })
+ }
+ for k, v := range stateMap {
+ diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: v,
+ NewRemoved: true,
+ })
+ }
+
+ return nil
+}
+
+func (m schemaMap) diffSet(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+
+ o, n, _, computedSet := d.diffChange(k)
+ if computedSet {
+ n = nil
+ }
+ nSet := n != nil
+
+ // If we have an old value and no new value is set or will be
+ // computed once all variables can be interpolated and we're
+ // computed, then nothing has changed.
+ if o != nil && n == nil && !computedSet && schema.Computed {
+ return nil
+ }
+
+ if o == nil {
+ o = schema.ZeroValue().(*Set)
+ }
+ if n == nil {
+ n = schema.ZeroValue().(*Set)
+ }
+ os := o.(*Set)
+ ns := n.(*Set)
+
+ // If the new value was set, compare the listCode's to determine if
+ // the two are equal. Comparing listCode's instead of the actual values
+ // is needed because there could be computed values in the set which
+ // would result in false positives while comparing.
+ if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) {
+ return nil
+ }
+
+ // Get the counts
+ oldLen := os.Len()
+ newLen := ns.Len()
+ oldStr := strconv.Itoa(oldLen)
+ newStr := strconv.Itoa(newLen)
+
+ // Build a schema for our count
+ countSchema := &Schema{
+ Type: TypeInt,
+ Computed: schema.Computed,
+ ForceNew: schema.ForceNew,
+ }
+
+ // If the set computed then say that the # is computed
+ if computedSet || schema.Computed && !nSet {
+ // If # already exists, equals 0 and no new set is supplied, there
+ // is nothing to record in the diff
+ count, ok := d.GetOk(k + ".#")
+ if ok && count.(int) == 0 && !nSet && !computedSet {
+ return nil
+ }
+
+ // Set the count but make sure that if # does not exist, we don't
+ // use the zeroed value
+ countStr := strconv.Itoa(count.(int))
+ if !ok {
+ countStr = ""
+ }
+
+ diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: countStr,
+ NewComputed: true,
+ })
+ return nil
+ }
+
+ // If the counts are not the same, then record that diff
+ changed := oldLen != newLen
+ if changed || all {
+ diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: oldStr,
+ New: newStr,
+ })
+ }
+
+ // Build the list of codes that will make up our set. This is the
+ // removed codes as well as all the codes in the new codes.
+ codes := make([][]string, 2)
+ codes[0] = os.Difference(ns).listCode()
+ codes[1] = ns.listCode()
+ for _, list := range codes {
+ for _, code := range list {
+ switch t := schema.Elem.(type) {
+ case *Resource:
+ // This is a complex resource
+ for k2, schema := range t.Schema {
+ subK := fmt.Sprintf("%s.%s.%s", k, code, k2)
+ err := m.diff(subK, schema, diff, d, true)
+ if err != nil {
+ return err
+ }
+ }
+ case *Schema:
+ // Copy the schema so that we can set Computed/ForceNew from
+ // the parent schema (the TypeSet).
+ t2 := *t
+ t2.ForceNew = schema.ForceNew
+
+ // This is just a primitive element, so go through each and
+ // just diff each.
+ subK := fmt.Sprintf("%s.%s", k, code)
+ err := m.diff(subK, &t2, diff, d, true)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("%s: unknown element type (internal)", k)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (m schemaMap) diffString(
+ k string,
+ schema *Schema,
+ diff *terraform.InstanceDiff,
+ d *ResourceData,
+ all bool) error {
+ var originalN interface{}
+ var os, ns string
+ o, n, _, computed := d.diffChange(k)
+ if schema.StateFunc != nil && n != nil {
+ originalN = n
+ n = schema.StateFunc(n)
+ }
+ nraw := n
+ if nraw == nil && o != nil {
+ nraw = schema.Type.Zero()
+ }
+ if err := mapstructure.WeakDecode(o, &os); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+ if err := mapstructure.WeakDecode(nraw, &ns); err != nil {
+ return fmt.Errorf("%s: %s", k, err)
+ }
+
+ if os == ns && !all {
+ // They're the same value. If there old value is not blank or we
+ // have an ID, then return right away since we're already setup.
+ if os != "" || d.Id() != "" {
+ return nil
+ }
+
+ // Otherwise, only continue if we're computed
+ if !schema.Computed && !computed {
+ return nil
+ }
+ }
+
+ removed := false
+ if o != nil && n == nil {
+ removed = true
+ }
+ if removed && schema.Computed {
+ return nil
+ }
+
+ diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
+ Old: os,
+ New: ns,
+ NewExtra: originalN,
+ NewRemoved: removed,
+ NewComputed: computed,
+ })
+
+ return nil
+}
+
+func (m schemaMap) inputString(
+ input terraform.UIInput,
+ k string,
+ schema *Schema) (interface{}, error) {
+ result, err := input.Input(&terraform.InputOpts{
+ Id: k,
+ Query: k,
+ Description: schema.Description,
+ Default: schema.InputDefault,
+ })
+
+ return result, err
+}
+
+func (m schemaMap) validate(
+ k string,
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ raw, ok := c.Get(k)
+ if !ok && schema.DefaultFunc != nil {
+ // We have a dynamic default. Check if we have a value.
+ var err error
+ raw, err = schema.DefaultFunc()
+ if err != nil {
+ return nil, []error{fmt.Errorf(
+ "%q, error loading default: %s", k, err)}
+ }
+
+ // We're okay as long as we had a value set
+ ok = raw != nil
+ }
+ if !ok {
+ if schema.Required {
+ return nil, []error{fmt.Errorf(
+ "%q: required field is not set", k)}
+ }
+
+ return nil, nil
+ }
+
+ if !schema.Required && !schema.Optional {
+ // This is a computed-only field
+ return nil, []error{fmt.Errorf(
+ "%q: this field cannot be set", k)}
+ }
+
+ err := m.validateConflictingAttributes(k, schema, c)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ return m.validateType(k, raw, schema, c)
+}
+
+func (m schemaMap) validateConflictingAttributes(
+ k string,
+ schema *Schema,
+ c *terraform.ResourceConfig) error {
+
+ if len(schema.ConflictsWith) == 0 {
+ return nil
+ }
+
+ for _, conflicting_key := range schema.ConflictsWith {
+ if value, ok := c.Get(conflicting_key); ok {
+ return fmt.Errorf(
+ "%q: conflicts with %s (%#v)", k, conflicting_key, value)
+ }
+ }
+
+ return nil
+}
+
+func (m schemaMap) validateList(
+ k string,
+ raw interface{},
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ // We use reflection to verify the slice because you can't
+ // case to []interface{} unless the slice is exactly that type.
+ rawV := reflect.ValueOf(raw)
+
+ // If we support promotion and the raw value isn't a slice, wrap
+ // it in []interface{} and check again.
+ if schema.PromoteSingle && rawV.Kind() != reflect.Slice {
+ raw = []interface{}{raw}
+ rawV = reflect.ValueOf(raw)
+ }
+
+ if rawV.Kind() != reflect.Slice {
+ return nil, []error{fmt.Errorf(
+ "%s: should be a list", k)}
+ }
+
+ // Validate length
+ if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems {
+ return nil, []error{fmt.Errorf(
+ "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())}
+ }
+
+ if schema.MinItems > 0 && rawV.Len() < schema.MinItems {
+ return nil, []error{fmt.Errorf(
+ "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())}
+ }
+
+ // Now build the []interface{}
+ raws := make([]interface{}, rawV.Len())
+ for i, _ := range raws {
+ raws[i] = rawV.Index(i).Interface()
+ }
+
+ var ws []string
+ var es []error
+ for i, raw := range raws {
+ key := fmt.Sprintf("%s.%d", k, i)
+
+ // Reify the key value from the ResourceConfig.
+ // If the list was computed we have all raw values, but some of these
+ // may be known in the config, and aren't individually marked as Computed.
+ if r, ok := c.Get(key); ok {
+ raw = r
+ }
+
+ var ws2 []string
+ var es2 []error
+ switch t := schema.Elem.(type) {
+ case *Resource:
+ // This is a sub-resource
+ ws2, es2 = m.validateObject(key, t.Schema, c)
+ case *Schema:
+ ws2, es2 = m.validateType(key, raw, t, c)
+ }
+
+ if len(ws2) > 0 {
+ ws = append(ws, ws2...)
+ }
+ if len(es2) > 0 {
+ es = append(es, es2...)
+ }
+ }
+
+ return ws, es
+}
+
+func (m schemaMap) validateMap(
+ k string,
+ raw interface{},
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ // We use reflection to verify the slice because you can't
+ // case to []interface{} unless the slice is exactly that type.
+ rawV := reflect.ValueOf(raw)
+ switch rawV.Kind() {
+ case reflect.String:
+ // If raw and reified are equal, this is a string and should
+ // be rejected.
+ reified, reifiedOk := c.Get(k)
+ if reifiedOk && raw == reified && !c.IsComputed(k) {
+ return nil, []error{fmt.Errorf("%s: should be a map", k)}
+ }
+ // Otherwise it's likely raw is an interpolation.
+ return nil, nil
+ case reflect.Map:
+ case reflect.Slice:
+ default:
+ return nil, []error{fmt.Errorf("%s: should be a map", k)}
+ }
+
+ // If it is not a slice, validate directly
+ if rawV.Kind() != reflect.Slice {
+ mapIface := rawV.Interface()
+ if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 {
+ return nil, errs
+ }
+ if schema.ValidateFunc != nil {
+ return schema.ValidateFunc(mapIface, k)
+ }
+ return nil, nil
+ }
+
+ // It is a slice, verify that all the elements are maps
+ raws := make([]interface{}, rawV.Len())
+ for i, _ := range raws {
+ raws[i] = rawV.Index(i).Interface()
+ }
+
+ for _, raw := range raws {
+ v := reflect.ValueOf(raw)
+ if v.Kind() != reflect.Map {
+ return nil, []error{fmt.Errorf(
+ "%s: should be a map", k)}
+ }
+ mapIface := v.Interface()
+ if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 {
+ return nil, errs
+ }
+ }
+
+ if schema.ValidateFunc != nil {
+ validatableMap := make(map[string]interface{})
+ for _, raw := range raws {
+ for k, v := range raw.(map[string]interface{}) {
+ validatableMap[k] = v
+ }
+ }
+
+ return schema.ValidateFunc(validatableMap, k)
+ }
+
+ return nil, nil
+}
+
+func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) {
+ for key, raw := range m {
+ valueType, err := getValueType(k, schema)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ switch valueType {
+ case TypeBool:
+ var n bool
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+ }
+ case TypeInt:
+ var n int
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+ }
+ case TypeFloat:
+ var n float64
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+ }
+ case TypeString:
+ var n string
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+ }
+ default:
+ panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type))
+ }
+ }
+ return nil, nil
+}
+
+func getValueType(k string, schema *Schema) (ValueType, error) {
+ if schema.Elem == nil {
+ return TypeString, nil
+ }
+ if vt, ok := schema.Elem.(ValueType); ok {
+ return vt, nil
+ }
+
+ if s, ok := schema.Elem.(*Schema); ok {
+ if s.Elem == nil {
+ return TypeString, nil
+ }
+ if vt, ok := s.Elem.(ValueType); ok {
+ return vt, nil
+ }
+ }
+
+ if _, ok := schema.Elem.(*Resource); ok {
+ // TODO: We don't actually support this (yet)
+ // but silently pass the validation, until we decide
+ // how to handle nested structures in maps
+ return TypeString, nil
+ }
+ return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem)
+}
+
+func (m schemaMap) validateObject(
+ k string,
+ schema map[string]*Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ raw, _ := c.GetRaw(k)
+ if _, ok := raw.(map[string]interface{}); !ok {
+ return nil, []error{fmt.Errorf(
+ "%s: expected object, got %s",
+ k, reflect.ValueOf(raw).Kind())}
+ }
+
+ var ws []string
+ var es []error
+ for subK, s := range schema {
+ key := subK
+ if k != "" {
+ key = fmt.Sprintf("%s.%s", k, subK)
+ }
+
+ ws2, es2 := m.validate(key, s, c)
+ if len(ws2) > 0 {
+ ws = append(ws, ws2...)
+ }
+ if len(es2) > 0 {
+ es = append(es, es2...)
+ }
+ }
+
+ // Detect any extra/unknown keys and report those as errors.
+ if m, ok := raw.(map[string]interface{}); ok {
+ for subk, _ := range m {
+ if _, ok := schema[subk]; !ok {
+ if subk == TimeoutsConfigKey {
+ continue
+ }
+ es = append(es, fmt.Errorf(
+ "%s: invalid or unknown key: %s", k, subk))
+ }
+ }
+ }
+
+ return ws, es
+}
+
+func (m schemaMap) validatePrimitive(
+ k string,
+ raw interface{},
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+
+ // Catch if the user gave a complex type where a primitive was
+ // expected, so we can return a friendly error message that
+ // doesn't contain Go type system terminology.
+ switch reflect.ValueOf(raw).Type().Kind() {
+ case reflect.Slice:
+ return nil, []error{
+ fmt.Errorf("%s must be a single value, not a list", k),
+ }
+ case reflect.Map:
+ return nil, []error{
+ fmt.Errorf("%s must be a single value, not a map", k),
+ }
+ default: // ok
+ }
+
+ if c.IsComputed(k) {
+ // If the key is being computed, then it is not an error as
+ // long as it's not a slice or map.
+ return nil, nil
+ }
+
+ var decoded interface{}
+ switch schema.Type {
+ case TypeBool:
+ // Verify that we can parse this as the correct type
+ var n bool
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s: %s", k, err)}
+ }
+ decoded = n
+ case TypeInt:
+ // Verify that we can parse this as an int
+ var n int
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s: %s", k, err)}
+ }
+ decoded = n
+ case TypeFloat:
+ // Verify that we can parse this as an int
+ var n float64
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s: %s", k, err)}
+ }
+ decoded = n
+ case TypeString:
+ // Verify that we can parse this as a string
+ var n string
+ if err := mapstructure.WeakDecode(raw, &n); err != nil {
+ return nil, []error{fmt.Errorf("%s: %s", k, err)}
+ }
+ decoded = n
+ default:
+ panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type))
+ }
+
+ if schema.ValidateFunc != nil {
+ return schema.ValidateFunc(decoded, k)
+ }
+
+ return nil, nil
+}
+
+func (m schemaMap) validateType(
+ k string,
+ raw interface{},
+ schema *Schema,
+ c *terraform.ResourceConfig) ([]string, []error) {
+ var ws []string
+ var es []error
+ switch schema.Type {
+ case TypeSet, TypeList:
+ ws, es = m.validateList(k, raw, schema, c)
+ case TypeMap:
+ ws, es = m.validateMap(k, raw, schema, c)
+ default:
+ ws, es = m.validatePrimitive(k, raw, schema, c)
+ }
+
+ if schema.Deprecated != "" {
+ ws = append(ws, fmt.Sprintf(
+ "%q: [DEPRECATED] %s", k, schema.Deprecated))
+ }
+
+ if schema.Removed != "" {
+ es = append(es, fmt.Errorf(
+ "%q: [REMOVED] %s", k, schema.Removed))
+ }
+
+ return ws, es
+}
+
+// Zero returns the zero value for a type.
+func (t ValueType) Zero() interface{} {
+ switch t {
+ case TypeInvalid:
+ return nil
+ case TypeBool:
+ return false
+ case TypeInt:
+ return 0
+ case TypeFloat:
+ return 0.0
+ case TypeString:
+ return ""
+ case TypeList:
+ return []interface{}{}
+ case TypeMap:
+ return map[string]interface{}{}
+ case TypeSet:
+ return new(Set)
+ case typeObject:
+ return map[string]interface{}{}
+ default:
+ panic(fmt.Sprintf("unknown type %s", t))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go
new file mode 100644
index 00000000..3eb2d007
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go
@@ -0,0 +1,122 @@
+package schema
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+)
+
+func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) {
+ if val == nil {
+ buf.WriteRune(';')
+ return
+ }
+
+ switch schema.Type {
+ case TypeBool:
+ if val.(bool) {
+ buf.WriteRune('1')
+ } else {
+ buf.WriteRune('0')
+ }
+ case TypeInt:
+ buf.WriteString(strconv.Itoa(val.(int)))
+ case TypeFloat:
+ buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64))
+ case TypeString:
+ buf.WriteString(val.(string))
+ case TypeList:
+ buf.WriteRune('(')
+ l := val.([]interface{})
+ for _, innerVal := range l {
+ serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
+ }
+ buf.WriteRune(')')
+ case TypeMap:
+
+ m := val.(map[string]interface{})
+ var keys []string
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ buf.WriteRune('[')
+ for _, k := range keys {
+ innerVal := m[k]
+ if innerVal == nil {
+ continue
+ }
+ buf.WriteString(k)
+ buf.WriteRune(':')
+
+ switch innerVal := innerVal.(type) {
+ case int:
+ buf.WriteString(strconv.Itoa(innerVal))
+ case float64:
+ buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64))
+ case string:
+ buf.WriteString(innerVal)
+ default:
+ panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal))
+ }
+
+ buf.WriteRune(';')
+ }
+ buf.WriteRune(']')
+ case TypeSet:
+ buf.WriteRune('{')
+ s := val.(*Set)
+ for _, innerVal := range s.List() {
+ serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
+ }
+ buf.WriteRune('}')
+ default:
+ panic("unknown schema type to serialize")
+ }
+ buf.WriteRune(';')
+}
+
+// SerializeValueForHash appends a serialization of the given resource config
+// to the given buffer, guaranteeing deterministic results given the same value
+// and schema.
+//
+// Its primary purpose is as input into a hashing function in order
+// to hash complex substructures when used in sets, and so the serialization
+// is not reversible.
+func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) {
+ sm := resource.Schema
+ m := val.(map[string]interface{})
+ var keys []string
+ for k := range sm {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ innerSchema := sm[k]
+ // Skip attributes that are not user-provided. Computed attributes
+ // do not contribute to the hash since their ultimate value cannot
+ // be known at plan/diff time.
+ if !(innerSchema.Required || innerSchema.Optional) {
+ continue
+ }
+
+ buf.WriteString(k)
+ buf.WriteRune(':')
+ innerVal := m[k]
+ SerializeValueForHash(buf, innerVal, innerSchema)
+ }
+}
+
+func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) {
+ switch tElem := elem.(type) {
+ case *Schema:
+ SerializeValueForHash(buf, val, tElem)
+ case *Resource:
+ buf.WriteRune('<')
+ SerializeResourceForHash(buf, val, tElem)
+ buf.WriteString(">;")
+ default:
+ panic(fmt.Sprintf("invalid element type: %T", tElem))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
new file mode 100644
index 00000000..de05f40e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
@@ -0,0 +1,209 @@
+package schema
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+
+ "github.com/hashicorp/terraform/helper/hashcode"
+)
+
+// HashString hashes strings. If you want a Set of strings, this is the
+// SchemaSetFunc you want.
+func HashString(v interface{}) int {
+ return hashcode.String(v.(string))
+}
+
+// HashResource hashes complex structures that are described using
+// a *Resource. This is the default set implementation used when a set's
+// element type is a full resource.
+func HashResource(resource *Resource) SchemaSetFunc {
+ return func(v interface{}) int {
+ var buf bytes.Buffer
+ SerializeResourceForHash(&buf, v, resource)
+ return hashcode.String(buf.String())
+ }
+}
+
+// HashSchema hashes values that are described using a *Schema. This is the
+// default set implementation used when a set's element type is a single
+// schema.
+func HashSchema(schema *Schema) SchemaSetFunc {
+ return func(v interface{}) int {
+ var buf bytes.Buffer
+ SerializeValueForHash(&buf, v, schema)
+ return hashcode.String(buf.String())
+ }
+}
+
+// Set is a set data structure that is returned for elements of type
+// TypeSet.
+type Set struct {
+ F SchemaSetFunc
+
+ m map[string]interface{}
+ once sync.Once
+}
+
+// NewSet is a convenience method for creating a new set with the given
+// items.
+func NewSet(f SchemaSetFunc, items []interface{}) *Set {
+ s := &Set{F: f}
+ for _, i := range items {
+ s.Add(i)
+ }
+
+ return s
+}
+
+// CopySet returns a copy of another set.
+func CopySet(otherSet *Set) *Set {
+ return NewSet(otherSet.F, otherSet.List())
+}
+
+// Add adds an item to the set if it isn't already in the set.
+func (s *Set) Add(item interface{}) {
+ s.add(item, false)
+}
+
+// Remove removes an item if it's already in the set. Idempotent.
+func (s *Set) Remove(item interface{}) {
+ s.remove(item)
+}
+
+// Contains checks if the set has the given item.
+func (s *Set) Contains(item interface{}) bool {
+ _, ok := s.m[s.hash(item)]
+ return ok
+}
+
+// Len returns the amount of items in the set.
+func (s *Set) Len() int {
+ return len(s.m)
+}
+
+// List returns the elements of this set in slice format.
+//
+// The order of the returned elements is deterministic. Given the same
+// set, the order of this will always be the same.
+func (s *Set) List() []interface{} {
+ result := make([]interface{}, len(s.m))
+ for i, k := range s.listCode() {
+ result[i] = s.m[k]
+ }
+
+ return result
+}
+
+// Difference performs a set difference of the two sets, returning
+// a new third set that has only the elements unique to this set.
+func (s *Set) Difference(other *Set) *Set {
+ result := &Set{F: s.F}
+ result.once.Do(result.init)
+
+ for k, v := range s.m {
+ if _, ok := other.m[k]; !ok {
+ result.m[k] = v
+ }
+ }
+
+ return result
+}
+
+// Intersection performs the set intersection of the two sets
+// and returns a new third set.
+func (s *Set) Intersection(other *Set) *Set {
+ result := &Set{F: s.F}
+ result.once.Do(result.init)
+
+ for k, v := range s.m {
+ if _, ok := other.m[k]; ok {
+ result.m[k] = v
+ }
+ }
+
+ return result
+}
+
+// Union performs the set union of the two sets and returns a new third
+// set.
+func (s *Set) Union(other *Set) *Set {
+ result := &Set{F: s.F}
+ result.once.Do(result.init)
+
+ for k, v := range s.m {
+ result.m[k] = v
+ }
+ for k, v := range other.m {
+ result.m[k] = v
+ }
+
+ return result
+}
+
+func (s *Set) Equal(raw interface{}) bool {
+ other, ok := raw.(*Set)
+ if !ok {
+ return false
+ }
+
+ return reflect.DeepEqual(s.m, other.m)
+}
+
+func (s *Set) GoString() string {
+ return fmt.Sprintf("*Set(%#v)", s.m)
+}
+
+func (s *Set) init() {
+ s.m = make(map[string]interface{})
+}
+
+func (s *Set) add(item interface{}, computed bool) string {
+ s.once.Do(s.init)
+
+ code := s.hash(item)
+ if computed {
+ code = "~" + code
+ }
+
+ if _, ok := s.m[code]; !ok {
+ s.m[code] = item
+ }
+
+ return code
+}
+
+func (s *Set) hash(item interface{}) string {
+ code := s.F(item)
+ // Always return a nonnegative hashcode.
+ if code < 0 {
+ code = -code
+ }
+ return strconv.Itoa(code)
+}
+
+func (s *Set) remove(item interface{}) string {
+ s.once.Do(s.init)
+
+ code := s.hash(item)
+ delete(s.m, code)
+
+ return code
+}
+
+func (s *Set) index(item interface{}) int {
+ return sort.SearchStrings(s.listCode(), s.hash(item))
+}
+
+func (s *Set) listCode() []string {
+ // Sort the hash codes so the order of the list is deterministic
+ keys := make([]string, 0, len(s.m))
+ for k := range s.m {
+ keys = append(keys, k)
+ }
+ sort.Sort(sort.StringSlice(keys))
+ return keys
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
new file mode 100644
index 00000000..9765bdbc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
@@ -0,0 +1,30 @@
+package schema
+
+import (
+ "testing"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// TestResourceDataRaw creates a ResourceData from a raw configuration map.
+func TestResourceDataRaw(
+ t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData {
+ c, err := config.NewRawConfig(raw)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ sm := schemaMap(schema)
+ diff, err := sm.Diff(nil, terraform.NewResourceConfig(c))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ result, err := sm.Data(nil, diff)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go
new file mode 100644
index 00000000..9286987d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go
@@ -0,0 +1,21 @@
+package schema
+
+//go:generate stringer -type=ValueType valuetype.go
+
+// ValueType is an enum of the type that can be represented by a schema.
+type ValueType int
+
+const (
+ TypeInvalid ValueType = iota
+ TypeBool
+ TypeInt
+ TypeFloat
+ TypeString
+ TypeList
+ TypeMap
+ TypeSet
+ typeObject
+)
+
+// NOTE: ValueType has more functions defined on it in schema.go. We can't
+// put them here because we reference other files.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
new file mode 100644
index 00000000..1610cec2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT.
+
+package schema
+
+import "fmt"
+
+const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject"
+
+var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77}
+
+func (i ValueType) String() string {
+ if i < 0 || i >= ValueType(len(_ValueType_index)-1) {
+ return fmt.Sprintf("ValueType(%d)", i)
+ }
+ return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
new file mode 100644
index 00000000..7edd5e75
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
@@ -0,0 +1,80 @@
+package shadow
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// Close will close all shadow values within the given structure.
+//
+// This uses reflection to walk the structure, find all shadow elements,
+// and close them. Currently this will only find struct fields that are
+// shadow values, and not slice elements, etc.
+func Close(v interface{}) error {
+ // We require a pointer so we can address the internal fields
+ val := reflect.ValueOf(v)
+ if val.Kind() != reflect.Ptr {
+ return fmt.Errorf("value must be a pointer")
+ }
+
+ // Walk and close
+ var w closeWalker
+ if err := reflectwalk.Walk(v, &w); err != nil {
+ return err
+ }
+
+ return w.Err
+}
+
+type closeWalker struct {
+ Err error
+}
+
+func (w *closeWalker) Struct(reflect.Value) error {
+ // Do nothing. We implement this for reflectwalk.StructWalker
+ return nil
+}
+
+func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error {
+ // Not sure why this would be but lets avoid some panics
+ if !v.IsValid() {
+ return nil
+ }
+
+ // Empty for exported, so don't check unexported fields
+ if f.PkgPath != "" {
+ return nil
+ }
+
+ // Verify the io.Closer is in this package
+ typ := v.Type()
+ if typ.PkgPath() != "github.com/hashicorp/terraform/helper/shadow" {
+ return nil
+ }
+
+ // We're looking for an io.Closer
+ raw := v.Interface()
+ if raw == nil {
+ return nil
+ }
+
+ closer, ok := raw.(io.Closer)
+ if !ok && v.CanAddr() {
+ closer, ok = v.Addr().Interface().(io.Closer)
+ }
+ if !ok {
+ return reflectwalk.SkipEntry
+ }
+
+ // Close it
+ if err := closer.Close(); err != nil {
+ w.Err = multierror.Append(w.Err, err)
+ }
+
+ // Don't go into the struct field
+ return reflectwalk.SkipEntry
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
new file mode 100644
index 00000000..4223e925
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
@@ -0,0 +1,128 @@
+package shadow
+
+import (
+ "sync"
+)
+
+// ComparedValue is a struct that finds a value by comparing some key
+// to the list of stored values. This is useful when there is no easy
+// uniquely identifying key that works in a map (for that, use KeyedValue).
+//
+// ComparedValue is very expensive, relative to other Value types. Try to
+// limit the number of values stored in a ComparedValue by potentially
+// nesting it within a KeyedValue (a keyed value points to a compared value,
+// for example).
+type ComparedValue struct {
+ // Func is a function that is given the lookup key and a single
+ // stored value. If it matches, it returns true.
+ Func func(k, v interface{}) bool
+
+ lock sync.Mutex
+ once sync.Once
+ closed bool
+ values []interface{}
+ waiters map[interface{}]*Value
+}
+
+// Close closes the value. This can never fail. For a definition of
+// "close" see the ErrClosed docs.
+func (w *ComparedValue) Close() error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // Set closed to true always
+ w.closed = true
+
+ // For all waiters, complete with ErrClosed
+ for k, val := range w.waiters {
+ val.SetValue(ErrClosed)
+ delete(w.waiters, k)
+ }
+
+ return nil
+}
+
+// Value returns the value that was set for the given key, or blocks
+// until one is available.
+func (w *ComparedValue) Value(k interface{}) interface{} {
+ v, val := w.valueWaiter(k)
+ if val == nil {
+ return v
+ }
+
+ return val.Value()
+}
+
+// ValueOk gets the value for the given key, returning immediately if the
+// value doesn't exist. The second return argument is true if the value exists.
+func (w *ComparedValue) ValueOk(k interface{}) (interface{}, bool) {
+ v, val := w.valueWaiter(k)
+ return v, val == nil
+}
+
+func (w *ComparedValue) SetValue(v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.once.Do(w.init)
+
+ // Check if we already have this exact value (by simply comparing
+ // with == directly). If we do, then we don't insert it again.
+ found := false
+ for _, v2 := range w.values {
+ if v == v2 {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ // Set the value, always
+ w.values = append(w.values, v)
+ }
+
+ // Go through the waiters
+ for k, val := range w.waiters {
+ if w.Func(k, v) {
+ val.SetValue(v)
+ delete(w.waiters, k)
+ }
+ }
+}
+
+func (w *ComparedValue) valueWaiter(k interface{}) (interface{}, *Value) {
+ w.lock.Lock()
+ w.once.Do(w.init)
+
+ // Look for a pre-existing value
+ for _, v := range w.values {
+ if w.Func(k, v) {
+ w.lock.Unlock()
+ return v, nil
+ }
+ }
+
+ // If we're closed, return that
+ if w.closed {
+ w.lock.Unlock()
+ return ErrClosed, nil
+ }
+
+ // Pre-existing value doesn't exist, create a waiter
+ val := w.waiters[k]
+ if val == nil {
+ val = new(Value)
+ w.waiters[k] = val
+ }
+ w.lock.Unlock()
+
+ // Return the waiter
+ return nil, val
+}
+
+// Must be called with w.lock held.
+func (w *ComparedValue) init() {
+ w.waiters = make(map[interface{}]*Value)
+ if w.Func == nil {
+ w.Func = func(k, v interface{}) bool { return k == v }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
new file mode 100644
index 00000000..432b0366
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
@@ -0,0 +1,151 @@
+package shadow
+
+import (
+ "sync"
+)
+
+// KeyedValue is a struct that coordinates a value by key. If a value is
+// not available for a give key, it'll block until it is available.
+type KeyedValue struct {
+ lock sync.Mutex
+ once sync.Once
+ values map[string]interface{}
+ waiters map[string]*Value
+ closed bool
+}
+
+// Close closes the value. This can never fail. For a definition of
+// "close" see the ErrClosed docs.
+func (w *KeyedValue) Close() error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // Set closed to true always
+ w.closed = true
+
+ // For all waiters, complete with ErrClosed
+ for k, val := range w.waiters {
+ val.SetValue(ErrClosed)
+ delete(w.waiters, k)
+ }
+
+ return nil
+}
+
+// Value returns the value that was set for the given key, or blocks
+// until one is available.
+func (w *KeyedValue) Value(k string) interface{} {
+ w.lock.Lock()
+ v, val := w.valueWaiter(k)
+ w.lock.Unlock()
+
+ // If we have no waiter, then return the value
+ if val == nil {
+ return v
+ }
+
+ // We have a waiter, so wait
+ return val.Value()
+}
+
+// WaitForChange waits for the value with the given key to be set again.
+// If the key isn't set, it'll wait for an initial value. Note that while
+// it is called "WaitForChange", the value isn't guaranteed to _change_;
+// this will return when a SetValue is called for the given k.
+func (w *KeyedValue) WaitForChange(k string) interface{} {
+ w.lock.Lock()
+ w.once.Do(w.init)
+
+ // If we're closed, we're closed
+ if w.closed {
+ w.lock.Unlock()
+ return ErrClosed
+ }
+
+ // Check for an active waiter. If there isn't one, make it
+ val := w.waiters[k]
+ if val == nil {
+ val = new(Value)
+ w.waiters[k] = val
+ }
+ w.lock.Unlock()
+
+ // And wait
+ return val.Value()
+}
+
+// ValueOk gets the value for the given key, returning immediately if the
+// value doesn't exist. The second return argument is true if the value exists.
+func (w *KeyedValue) ValueOk(k string) (interface{}, bool) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ v, val := w.valueWaiter(k)
+ return v, val == nil
+}
+
+func (w *KeyedValue) SetValue(k string, v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.setValue(k, v)
+}
+
+// Init will initialize the key to a given value only if the key has
+// not been set before. This is safe to call multiple times and in parallel.
+func (w *KeyedValue) Init(k string, v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // If we have a waiter, set the value.
+ _, val := w.valueWaiter(k)
+ if val != nil {
+ w.setValue(k, v)
+ }
+}
+
+// Must be called with w.lock held.
+func (w *KeyedValue) init() {
+ w.values = make(map[string]interface{})
+ w.waiters = make(map[string]*Value)
+}
+
+// setValue is like SetValue but assumes the lock is held.
+func (w *KeyedValue) setValue(k string, v interface{}) {
+ w.once.Do(w.init)
+
+ // Set the value, always
+ w.values[k] = v
+
+ // If we have a waiter, set it
+ if val, ok := w.waiters[k]; ok {
+ val.SetValue(v)
+ delete(w.waiters, k)
+ }
+}
+
+// valueWaiter gets the value or the Value waiter for a given key.
+//
+// This must be called with lock held.
+func (w *KeyedValue) valueWaiter(k string) (interface{}, *Value) {
+ w.once.Do(w.init)
+
+ // If we have this value already, return it
+ if v, ok := w.values[k]; ok {
+ return v, nil
+ }
+
+ // If we're closed, return that
+ if w.closed {
+ return ErrClosed, nil
+ }
+
+ // No pending value, check for a waiter
+ val := w.waiters[k]
+ if val == nil {
+ val = new(Value)
+ w.waiters[k] = val
+ }
+
+ // Return the waiter
+ return nil, val
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
new file mode 100644
index 00000000..0a43d4d4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
@@ -0,0 +1,66 @@
+package shadow
+
+import (
+ "container/list"
+ "sync"
+)
+
+// OrderedValue is a struct that keeps track of a value in the order
+// it is set. Each time Value() is called, it will return the most recent
+// calls value then discard it.
+//
+// This is unlike Value that returns the same value once it is set.
+type OrderedValue struct {
+ lock sync.Mutex
+ values *list.List
+ waiters *list.List
+}
+
+// Value returns the last value that was set, or blocks until one
+// is received.
+func (w *OrderedValue) Value() interface{} {
+ w.lock.Lock()
+
+ // If we have a pending value already, use it
+ if w.values != nil && w.values.Len() > 0 {
+ front := w.values.Front()
+ w.values.Remove(front)
+ w.lock.Unlock()
+ return front.Value
+ }
+
+ // No pending value, create a waiter
+ if w.waiters == nil {
+ w.waiters = list.New()
+ }
+
+ var val Value
+ w.waiters.PushBack(&val)
+ w.lock.Unlock()
+
+ // Return the value once we have it
+ return val.Value()
+}
+
+// SetValue sets the latest value.
+func (w *OrderedValue) SetValue(v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // If we have a waiter, notify it
+ if w.waiters != nil && w.waiters.Len() > 0 {
+ front := w.waiters.Front()
+ w.waiters.Remove(front)
+
+ val := front.Value.(*Value)
+ val.SetValue(v)
+ return
+ }
+
+ // Add it to the list of values
+ if w.values == nil {
+ w.values = list.New()
+ }
+
+ w.values.PushBack(v)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
new file mode 100644
index 00000000..2413335b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
@@ -0,0 +1,79 @@
+package shadow
+
+import (
+ "errors"
+ "sync"
+)
+
+// ErrClosed is returned by any closed values.
+//
+// A "closed value" is when the shadow has been notified that the real
+// side is complete and any blocking values will _never_ be satisfied
+// in the future. In this case, this error is returned. If a value is already
+// available, that is still returned.
+var ErrClosed = errors.New("shadow closed")
+
+// Value is a struct that coordinates a value between two
+// parallel routines. It is similar to atomic.Value except that when
+// Value is called if it isn't set it will wait for it.
+//
+// The Value can be closed with Close, which will cause any future
+// blocking operations to return immediately with ErrClosed.
+type Value struct {
+ lock sync.Mutex
+ cond *sync.Cond
+ value interface{}
+ valueSet bool
+}
+
+// Close closes the value. This can never fail. For a definition of
+// "close" see the struct docs.
+func (w *Value) Close() error {
+ w.lock.Lock()
+ set := w.valueSet
+ w.lock.Unlock()
+
+ // If we haven't set the value, set it
+ if !set {
+ w.SetValue(ErrClosed)
+ }
+
+ // Done
+ return nil
+}
+
+// Value returns the value that was set.
+func (w *Value) Value() interface{} {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // If we already have a value just return
+ for !w.valueSet {
+ // No value, setup the condition variable if we have to
+ if w.cond == nil {
+ w.cond = sync.NewCond(&w.lock)
+ }
+
+ // Wait on it
+ w.cond.Wait()
+ }
+
+ // Return the value
+ return w.value
+}
+
+// SetValue sets the value.
+func (w *Value) SetValue(v interface{}) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ // Set the value
+ w.valueSet = true
+ w.value = v
+
+ // If we have a condition, clear it
+ if w.cond != nil {
+ w.cond.Broadcast()
+ w.cond = nil
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/plugin.go b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
new file mode 100644
index 00000000..00fa7b29
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
@@ -0,0 +1,13 @@
+package plugin
+
+import (
+ "github.com/hashicorp/go-plugin"
+)
+
+// See serve.go for serving plugins
+
+// PluginMap should be used by clients for the map of plugins.
+var PluginMap = map[string]plugin.Plugin{
+ "provider": &ResourceProviderPlugin{},
+ "provisioner": &ResourceProvisionerPlugin{},
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
new file mode 100644
index 00000000..473f7860
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
@@ -0,0 +1,578 @@
+package plugin
+
+import (
+ "net/rpc"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// ResourceProviderPlugin is the plugin.Plugin implementation.
+type ResourceProviderPlugin struct {
+ F func() terraform.ResourceProvider
+}
+
+func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
+ return &ResourceProviderServer{Broker: b, Provider: p.F()}, nil
+}
+
+func (p *ResourceProviderPlugin) Client(
+ b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
+ return &ResourceProvider{Broker: b, Client: c}, nil
+}
+
+// ResourceProvider is an implementation of terraform.ResourceProvider
+// that communicates over RPC.
+type ResourceProvider struct {
+ Broker *plugin.MuxBroker
+ Client *rpc.Client
+}
+
+func (p *ResourceProvider) Stop() error {
+ var resp ResourceProviderStopResponse
+ err := p.Client.Call("Plugin.Stop", new(interface{}), &resp)
+ if err != nil {
+ return err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return err
+}
+
+func (p *ResourceProvider) Input(
+ input terraform.UIInput,
+ c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+ id := p.Broker.NextId()
+ go p.Broker.AcceptAndServe(id, &UIInputServer{
+ UIInput: input,
+ })
+
+ var resp ResourceProviderInputResponse
+ args := ResourceProviderInputArgs{
+ InputId: id,
+ Config: c,
+ }
+
+ err := p.Client.Call("Plugin.Input", &args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ return nil, err
+ }
+
+ return resp.Config, nil
+}
+
+func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ var resp ResourceProviderValidateResponse
+ args := ResourceProviderValidateArgs{
+ Config: c,
+ }
+
+ err := p.Client.Call("Plugin.Validate", &args, &resp)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ var errs []error
+ if len(resp.Errors) > 0 {
+ errs = make([]error, len(resp.Errors))
+ for i, err := range resp.Errors {
+ errs[i] = err
+ }
+ }
+
+ return resp.Warnings, errs
+}
+
+func (p *ResourceProvider) ValidateResource(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ var resp ResourceProviderValidateResourceResponse
+ args := ResourceProviderValidateResourceArgs{
+ Config: c,
+ Type: t,
+ }
+
+ err := p.Client.Call("Plugin.ValidateResource", &args, &resp)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ var errs []error
+ if len(resp.Errors) > 0 {
+ errs = make([]error, len(resp.Errors))
+ for i, err := range resp.Errors {
+ errs[i] = err
+ }
+ }
+
+ return resp.Warnings, errs
+}
+
+func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error {
+ var resp ResourceProviderConfigureResponse
+ err := p.Client.Call("Plugin.Configure", c, &resp)
+ if err != nil {
+ return err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return err
+}
+
+func (p *ResourceProvider) Apply(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+ var resp ResourceProviderApplyResponse
+ args := &ResourceProviderApplyArgs{
+ Info: info,
+ State: s,
+ Diff: d,
+ }
+
+ err := p.Client.Call("Plugin.Apply", args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return resp.State, err
+}
+
+func (p *ResourceProvider) Diff(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+ var resp ResourceProviderDiffResponse
+ args := &ResourceProviderDiffArgs{
+ Info: info,
+ State: s,
+ Config: c,
+ }
+ err := p.Client.Call("Plugin.Diff", args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return resp.Diff, err
+}
+
+func (p *ResourceProvider) ValidateDataSource(
+ t string, c *terraform.ResourceConfig) ([]string, []error) {
+ var resp ResourceProviderValidateResourceResponse
+ args := ResourceProviderValidateResourceArgs{
+ Config: c,
+ Type: t,
+ }
+
+ err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ var errs []error
+ if len(resp.Errors) > 0 {
+ errs = make([]error, len(resp.Errors))
+ for i, err := range resp.Errors {
+ errs[i] = err
+ }
+ }
+
+ return resp.Warnings, errs
+}
+
+func (p *ResourceProvider) Refresh(
+ info *terraform.InstanceInfo,
+ s *terraform.InstanceState) (*terraform.InstanceState, error) {
+ var resp ResourceProviderRefreshResponse
+ args := &ResourceProviderRefreshArgs{
+ Info: info,
+ State: s,
+ }
+
+ err := p.Client.Call("Plugin.Refresh", args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return resp.State, err
+}
+
+func (p *ResourceProvider) ImportState(
+ info *terraform.InstanceInfo,
+ id string) ([]*terraform.InstanceState, error) {
+ var resp ResourceProviderImportStateResponse
+ args := &ResourceProviderImportStateArgs{
+ Info: info,
+ Id: id,
+ }
+
+ err := p.Client.Call("Plugin.ImportState", args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return resp.State, err
+}
+
+func (p *ResourceProvider) Resources() []terraform.ResourceType {
+ var result []terraform.ResourceType
+
+ err := p.Client.Call("Plugin.Resources", new(interface{}), &result)
+ if err != nil {
+ // TODO: panic, log, what?
+ return nil
+ }
+
+ return result
+}
+
+func (p *ResourceProvider) ReadDataDiff(
+ info *terraform.InstanceInfo,
+ c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+ var resp ResourceProviderReadDataDiffResponse
+ args := &ResourceProviderReadDataDiffArgs{
+ Info: info,
+ Config: c,
+ }
+
+ err := p.Client.Call("Plugin.ReadDataDiff", args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return resp.Diff, err
+}
+
+func (p *ResourceProvider) ReadDataApply(
+ info *terraform.InstanceInfo,
+ d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+ var resp ResourceProviderReadDataApplyResponse
+ args := &ResourceProviderReadDataApplyArgs{
+ Info: info,
+ Diff: d,
+ }
+
+ err := p.Client.Call("Plugin.ReadDataApply", args, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return resp.State, err
+}
+
+func (p *ResourceProvider) DataSources() []terraform.DataSource {
+ var result []terraform.DataSource
+
+ err := p.Client.Call("Plugin.DataSources", new(interface{}), &result)
+ if err != nil {
+ // TODO: panic, log, what?
+ return nil
+ }
+
+ return result
+}
+
+func (p *ResourceProvider) Close() error {
+ return p.Client.Close()
+}
+
+// ResourceProviderServer is a net/rpc compatible structure for serving
+// a ResourceProvider. This should not be used directly.
+type ResourceProviderServer struct {
+ Broker *plugin.MuxBroker
+ Provider terraform.ResourceProvider
+}
+
+type ResourceProviderStopResponse struct {
+ Error *plugin.BasicError
+}
+
+type ResourceProviderConfigureResponse struct {
+ Error *plugin.BasicError
+}
+
+type ResourceProviderInputArgs struct {
+ InputId uint32
+ Config *terraform.ResourceConfig
+}
+
+type ResourceProviderInputResponse struct {
+ Config *terraform.ResourceConfig
+ Error *plugin.BasicError
+}
+
+type ResourceProviderApplyArgs struct {
+ Info *terraform.InstanceInfo
+ State *terraform.InstanceState
+ Diff *terraform.InstanceDiff
+}
+
+type ResourceProviderApplyResponse struct {
+ State *terraform.InstanceState
+ Error *plugin.BasicError
+}
+
+type ResourceProviderDiffArgs struct {
+ Info *terraform.InstanceInfo
+ State *terraform.InstanceState
+ Config *terraform.ResourceConfig
+}
+
+type ResourceProviderDiffResponse struct {
+ Diff *terraform.InstanceDiff
+ Error *plugin.BasicError
+}
+
+type ResourceProviderRefreshArgs struct {
+ Info *terraform.InstanceInfo
+ State *terraform.InstanceState
+}
+
+type ResourceProviderRefreshResponse struct {
+ State *terraform.InstanceState
+ Error *plugin.BasicError
+}
+
+type ResourceProviderImportStateArgs struct {
+ Info *terraform.InstanceInfo
+ Id string
+}
+
+type ResourceProviderImportStateResponse struct {
+ State []*terraform.InstanceState
+ Error *plugin.BasicError
+}
+
+type ResourceProviderReadDataApplyArgs struct {
+ Info *terraform.InstanceInfo
+ Diff *terraform.InstanceDiff
+}
+
+type ResourceProviderReadDataApplyResponse struct {
+ State *terraform.InstanceState
+ Error *plugin.BasicError
+}
+
+type ResourceProviderReadDataDiffArgs struct {
+ Info *terraform.InstanceInfo
+ Config *terraform.ResourceConfig
+}
+
+type ResourceProviderReadDataDiffResponse struct {
+ Diff *terraform.InstanceDiff
+ Error *plugin.BasicError
+}
+
+type ResourceProviderValidateArgs struct {
+ Config *terraform.ResourceConfig
+}
+
+type ResourceProviderValidateResponse struct {
+ Warnings []string
+ Errors []*plugin.BasicError
+}
+
+type ResourceProviderValidateResourceArgs struct {
+ Config *terraform.ResourceConfig
+ Type string
+}
+
+type ResourceProviderValidateResourceResponse struct {
+ Warnings []string
+ Errors []*plugin.BasicError
+}
+
+func (s *ResourceProviderServer) Stop(
+ _ interface{},
+ reply *ResourceProviderStopResponse) error {
+ err := s.Provider.Stop()
+ *reply = ResourceProviderStopResponse{
+ Error: plugin.NewBasicError(err),
+ }
+
+ return nil
+}
+
+func (s *ResourceProviderServer) Input(
+ args *ResourceProviderInputArgs,
+ reply *ResourceProviderInputResponse) error {
+ conn, err := s.Broker.Dial(args.InputId)
+ if err != nil {
+ *reply = ResourceProviderInputResponse{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+ }
+ client := rpc.NewClient(conn)
+ defer client.Close()
+
+ input := &UIInput{Client: client}
+
+ config, err := s.Provider.Input(input, args.Config)
+ *reply = ResourceProviderInputResponse{
+ Config: config,
+ Error: plugin.NewBasicError(err),
+ }
+
+ return nil
+}
+
+func (s *ResourceProviderServer) Validate(
+ args *ResourceProviderValidateArgs,
+ reply *ResourceProviderValidateResponse) error {
+ warns, errs := s.Provider.Validate(args.Config)
+ berrs := make([]*plugin.BasicError, len(errs))
+ for i, err := range errs {
+ berrs[i] = plugin.NewBasicError(err)
+ }
+ *reply = ResourceProviderValidateResponse{
+ Warnings: warns,
+ Errors: berrs,
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) ValidateResource(
+ args *ResourceProviderValidateResourceArgs,
+ reply *ResourceProviderValidateResourceResponse) error {
+ warns, errs := s.Provider.ValidateResource(args.Type, args.Config)
+ berrs := make([]*plugin.BasicError, len(errs))
+ for i, err := range errs {
+ berrs[i] = plugin.NewBasicError(err)
+ }
+ *reply = ResourceProviderValidateResourceResponse{
+ Warnings: warns,
+ Errors: berrs,
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) Configure(
+ config *terraform.ResourceConfig,
+ reply *ResourceProviderConfigureResponse) error {
+ err := s.Provider.Configure(config)
+ *reply = ResourceProviderConfigureResponse{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) Apply(
+ args *ResourceProviderApplyArgs,
+ result *ResourceProviderApplyResponse) error {
+ state, err := s.Provider.Apply(args.Info, args.State, args.Diff)
+ *result = ResourceProviderApplyResponse{
+ State: state,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) Diff(
+ args *ResourceProviderDiffArgs,
+ result *ResourceProviderDiffResponse) error {
+ diff, err := s.Provider.Diff(args.Info, args.State, args.Config)
+ *result = ResourceProviderDiffResponse{
+ Diff: diff,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) Refresh(
+ args *ResourceProviderRefreshArgs,
+ result *ResourceProviderRefreshResponse) error {
+ newState, err := s.Provider.Refresh(args.Info, args.State)
+ *result = ResourceProviderRefreshResponse{
+ State: newState,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) ImportState(
+ args *ResourceProviderImportStateArgs,
+ result *ResourceProviderImportStateResponse) error {
+ states, err := s.Provider.ImportState(args.Info, args.Id)
+ *result = ResourceProviderImportStateResponse{
+ State: states,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) Resources(
+ nothing interface{},
+ result *[]terraform.ResourceType) error {
+ *result = s.Provider.Resources()
+ return nil
+}
+
+func (s *ResourceProviderServer) ValidateDataSource(
+ args *ResourceProviderValidateResourceArgs,
+ reply *ResourceProviderValidateResourceResponse) error {
+ warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config)
+ berrs := make([]*plugin.BasicError, len(errs))
+ for i, err := range errs {
+ berrs[i] = plugin.NewBasicError(err)
+ }
+ *reply = ResourceProviderValidateResourceResponse{
+ Warnings: warns,
+ Errors: berrs,
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) ReadDataDiff(
+ args *ResourceProviderReadDataDiffArgs,
+ result *ResourceProviderReadDataDiffResponse) error {
+ diff, err := s.Provider.ReadDataDiff(args.Info, args.Config)
+ *result = ResourceProviderReadDataDiffResponse{
+ Diff: diff,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) ReadDataApply(
+ args *ResourceProviderReadDataApplyArgs,
+ result *ResourceProviderReadDataApplyResponse) error {
+ newState, err := s.Provider.ReadDataApply(args.Info, args.Diff)
+ *result = ResourceProviderReadDataApplyResponse{
+ State: newState,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProviderServer) DataSources(
+ nothing interface{},
+ result *[]terraform.DataSource) error {
+ *result = s.Provider.DataSources()
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
new file mode 100644
index 00000000..8fce9d8a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
@@ -0,0 +1,173 @@
+package plugin
+
+import (
+ "net/rpc"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// ResourceProvisionerPlugin is the plugin.Plugin implementation.
+type ResourceProvisionerPlugin struct {
+ F func() terraform.ResourceProvisioner
+}
+
+func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
+ return &ResourceProvisionerServer{Broker: b, Provisioner: p.F()}, nil
+}
+
+func (p *ResourceProvisionerPlugin) Client(
+ b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
+ return &ResourceProvisioner{Broker: b, Client: c}, nil
+}
+
+// ResourceProvisioner is an implementation of terraform.ResourceProvisioner
+// that communicates over RPC.
+type ResourceProvisioner struct {
+ Broker *plugin.MuxBroker
+ Client *rpc.Client
+}
+
+func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+ var resp ResourceProvisionerValidateResponse
+ args := ResourceProvisionerValidateArgs{
+ Config: c,
+ }
+
+ err := p.Client.Call("Plugin.Validate", &args, &resp)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ var errs []error
+ if len(resp.Errors) > 0 {
+ errs = make([]error, len(resp.Errors))
+ for i, err := range resp.Errors {
+ errs[i] = err
+ }
+ }
+
+ return resp.Warnings, errs
+}
+
+func (p *ResourceProvisioner) Apply(
+ output terraform.UIOutput,
+ s *terraform.InstanceState,
+ c *terraform.ResourceConfig) error {
+ id := p.Broker.NextId()
+ go p.Broker.AcceptAndServe(id, &UIOutputServer{
+ UIOutput: output,
+ })
+
+ var resp ResourceProvisionerApplyResponse
+ args := &ResourceProvisionerApplyArgs{
+ OutputId: id,
+ State: s,
+ Config: c,
+ }
+
+ err := p.Client.Call("Plugin.Apply", args, &resp)
+ if err != nil {
+ return err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return err
+}
+
+func (p *ResourceProvisioner) Stop() error {
+ var resp ResourceProvisionerStopResponse
+ err := p.Client.Call("Plugin.Stop", new(interface{}), &resp)
+ if err != nil {
+ return err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ }
+
+ return err
+}
+
+func (p *ResourceProvisioner) Close() error {
+ return p.Client.Close()
+}
+
+type ResourceProvisionerValidateArgs struct {
+ Config *terraform.ResourceConfig
+}
+
+type ResourceProvisionerValidateResponse struct {
+ Warnings []string
+ Errors []*plugin.BasicError
+}
+
+type ResourceProvisionerApplyArgs struct {
+ OutputId uint32
+ State *terraform.InstanceState
+ Config *terraform.ResourceConfig
+}
+
+type ResourceProvisionerApplyResponse struct {
+ Error *plugin.BasicError
+}
+
+type ResourceProvisionerStopResponse struct {
+ Error *plugin.BasicError
+}
+
+// ResourceProvisionerServer is a net/rpc compatible structure for serving
+// a ResourceProvisioner. This should not be used directly.
+type ResourceProvisionerServer struct {
+ Broker *plugin.MuxBroker
+ Provisioner terraform.ResourceProvisioner
+}
+
+func (s *ResourceProvisionerServer) Apply(
+ args *ResourceProvisionerApplyArgs,
+ result *ResourceProvisionerApplyResponse) error {
+ conn, err := s.Broker.Dial(args.OutputId)
+ if err != nil {
+ *result = ResourceProvisionerApplyResponse{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+ }
+ client := rpc.NewClient(conn)
+ defer client.Close()
+
+ output := &UIOutput{Client: client}
+
+ err = s.Provisioner.Apply(output, args.State, args.Config)
+ *result = ResourceProvisionerApplyResponse{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *ResourceProvisionerServer) Validate(
+ args *ResourceProvisionerValidateArgs,
+ reply *ResourceProvisionerValidateResponse) error {
+ warns, errs := s.Provisioner.Validate(args.Config)
+ berrs := make([]*plugin.BasicError, len(errs))
+ for i, err := range errs {
+ berrs[i] = plugin.NewBasicError(err)
+ }
+ *reply = ResourceProvisionerValidateResponse{
+ Warnings: warns,
+ Errors: berrs,
+ }
+ return nil
+}
+
+func (s *ResourceProvisionerServer) Stop(
+ _ interface{},
+ reply *ResourceProvisionerStopResponse) error {
+ err := s.Provisioner.Stop()
+ *reply = ResourceProvisionerStopResponse{
+ Error: plugin.NewBasicError(err),
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/serve.go b/vendor/github.com/hashicorp/terraform/plugin/serve.go
new file mode 100644
index 00000000..2028a613
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/serve.go
@@ -0,0 +1,54 @@
+package plugin
+
+import (
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// The constants below are the names of the plugins that can be dispensed
+// from the plugin server.
+const (
+ ProviderPluginName = "provider"
+ ProvisionerPluginName = "provisioner"
+)
+
+// Handshake is the HandshakeConfig used to configure clients and servers.
+var Handshake = plugin.HandshakeConfig{
+ // The ProtocolVersion is the version that must match between TF core
+ // and TF plugins. This should be bumped whenever a change happens in
+ // one or the other that makes it so that they can't safely communicate.
+ // This could be adding a new interface value, it could be how
+ // helper/schema computes diffs, etc.
+ ProtocolVersion: 4,
+
+ // The magic cookie values should NEVER be changed.
+ MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE",
+ MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2",
+}
+
+type ProviderFunc func() terraform.ResourceProvider
+type ProvisionerFunc func() terraform.ResourceProvisioner
+
+// ServeOpts are the configurations to serve a plugin.
+type ServeOpts struct {
+ ProviderFunc ProviderFunc
+ ProvisionerFunc ProvisionerFunc
+}
+
+// Serve serves a plugin. This function never returns and should be the final
+// function called in the main function of the plugin.
+func Serve(opts *ServeOpts) {
+ plugin.Serve(&plugin.ServeConfig{
+ HandshakeConfig: Handshake,
+ Plugins: pluginMap(opts),
+ })
+}
+
+// pluginMap returns the map[string]plugin.Plugin to use for configuring a plugin
+// server or client.
+func pluginMap(opts *ServeOpts) map[string]plugin.Plugin {
+ return map[string]plugin.Plugin{
+ "provider": &ResourceProviderPlugin{F: opts.ProviderFunc},
+ "provisioner": &ResourceProvisionerPlugin{F: opts.ProvisionerFunc},
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
new file mode 100644
index 00000000..493efc0a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
@@ -0,0 +1,51 @@
+package plugin
+
+import (
+ "net/rpc"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// UIInput is an implementatin of terraform.UIInput that communicates
+// over RPC.
+type UIInput struct {
+ Client *rpc.Client
+}
+
+func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) {
+ var resp UIInputInputResponse
+ err := i.Client.Call("Plugin.Input", opts, &resp)
+ if err != nil {
+ return "", err
+ }
+ if resp.Error != nil {
+ err = resp.Error
+ return "", err
+ }
+
+ return resp.Value, nil
+}
+
+type UIInputInputResponse struct {
+ Value string
+ Error *plugin.BasicError
+}
+
+// UIInputServer is a net/rpc compatible structure for serving
+// a UIInputServer. This should not be used directly.
+type UIInputServer struct {
+ UIInput terraform.UIInput
+}
+
+func (s *UIInputServer) Input(
+ opts *terraform.InputOpts,
+ reply *UIInputInputResponse) error {
+ value, err := s.UIInput.Input(opts)
+ *reply = UIInputInputResponse{
+ Value: value,
+ Error: plugin.NewBasicError(err),
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_output.go b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go
new file mode 100644
index 00000000..c222b00c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go
@@ -0,0 +1,29 @@
+package plugin
+
+import (
+ "net/rpc"
+
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// UIOutput is an implementatin of terraform.UIOutput that communicates
+// over RPC.
+type UIOutput struct {
+ Client *rpc.Client
+}
+
+func (o *UIOutput) Output(v string) {
+ o.Client.Call("Plugin.Output", v, new(interface{}))
+}
+
+// UIOutputServer is the RPC server for serving UIOutput.
+type UIOutputServer struct {
+ UIOutput terraform.UIOutput
+}
+
+func (s *UIOutputServer) Output(
+ v string,
+ reply *interface{}) error {
+ s.UIOutput.Output(v)
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go
new file mode 100644
index 00000000..306128ed
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context.go
@@ -0,0 +1,1022 @@
+package terraform
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/experiment"
+)
+
+// InputMode defines what sort of input will be asked for when Input
+// is called on Context.
+type InputMode byte
+
+const (
+ // InputModeVar asks for all variables
+ InputModeVar InputMode = 1 << iota
+
+ // InputModeVarUnset asks for variables which are not set yet.
+ // InputModeVar must be set for this to have an effect.
+ InputModeVarUnset
+
+ // InputModeProvider asks for provider variables
+ InputModeProvider
+
+ // InputModeStd is the standard operating mode and asks for both variables
+ // and providers.
+ InputModeStd = InputModeVar | InputModeProvider
+)
+
+var (
+ // contextFailOnShadowError will cause Context operations to return
+ // errors when shadow operations fail. This is only used for testing.
+ contextFailOnShadowError = false
+
+ // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every
+ // Plan operation, effectively testing the Diff DeepCopy whenever
+ // a Plan occurs. This is enabled for tests.
+ contextTestDeepCopyOnPlan = false
+)
+
+// ContextOpts are the user-configurable options to create a context with
+// NewContext.
+type ContextOpts struct {
+ Meta *ContextMeta
+ Destroy bool
+ Diff *Diff
+ Hooks []Hook
+ Module *module.Tree
+ Parallelism int
+ State *State
+ StateFutureAllowed bool
+ Providers map[string]ResourceProviderFactory
+ Provisioners map[string]ResourceProvisionerFactory
+ Shadow bool
+ Targets []string
+ Variables map[string]interface{}
+
+ UIInput UIInput
+}
+
+// ContextMeta is metadata about the running context. This is information
+// that this package or structure cannot determine on its own but exposes
+// into Terraform in various ways. This must be provided by the Context
+// initializer.
+type ContextMeta struct {
+ Env string // Env is the state environment
+}
+
+// Context represents all the context that Terraform needs in order to
+// perform operations on infrastructure. This structure is built using
+// NewContext. See the documentation for that.
+//
+// Extra functions on Context can be found in context_*.go files.
+type Context struct {
+ // Maintainer note: Anytime this struct is changed, please verify
+ // that newShadowContext still does the right thing. Tests should
+ // fail regardless but putting this note here as well.
+
+ components contextComponentFactory
+ destroy bool
+ diff *Diff
+ diffLock sync.RWMutex
+ hooks []Hook
+ meta *ContextMeta
+ module *module.Tree
+ sh *stopHook
+ shadow bool
+ state *State
+ stateLock sync.RWMutex
+ targets []string
+ uiInput UIInput
+ variables map[string]interface{}
+
+ l sync.Mutex // Lock acquired during any task
+ parallelSem Semaphore
+ providerInputConfig map[string]map[string]interface{}
+ runLock sync.Mutex
+ runCond *sync.Cond
+ runContext context.Context
+ runContextCancel context.CancelFunc
+ shadowErr error
+}
+
+// NewContext creates a new Context structure.
+//
+// Once a Context is creator, the pointer values within ContextOpts
+// should not be mutated in any way, since the pointers are copied, not
+// the values themselves.
+func NewContext(opts *ContextOpts) (*Context, error) {
+ // Validate the version requirement if it is given
+ if opts.Module != nil {
+ if err := checkRequiredVersion(opts.Module); err != nil {
+ return nil, err
+ }
+ }
+
+ // Copy all the hooks and add our stop hook. We don't append directly
+ // to the Config so that we're not modifying that in-place.
+ sh := new(stopHook)
+ hooks := make([]Hook, len(opts.Hooks)+1)
+ copy(hooks, opts.Hooks)
+ hooks[len(opts.Hooks)] = sh
+
+ state := opts.State
+ if state == nil {
+ state = new(State)
+ state.init()
+ }
+
+ // If our state is from the future, then error. Callers can avoid
+ // this error by explicitly setting `StateFutureAllowed`.
+ if !opts.StateFutureAllowed && state.FromFutureTerraform() {
+ return nil, fmt.Errorf(
+ "Terraform doesn't allow running any operations against a state\n"+
+ "that was written by a future Terraform version. The state is\n"+
+ "reporting it is written by Terraform '%s'.\n\n"+
+ "Please run at least that version of Terraform to continue.",
+ state.TFVersion)
+ }
+
+ // Explicitly reset our state version to our current version so that
+ // any operations we do will write out that our latest version
+ // has run.
+ state.TFVersion = Version
+
+ // Determine parallelism, default to 10. We do this both to limit
+ // CPU pressure but also to have an extra guard against rate throttling
+ // from providers.
+ par := opts.Parallelism
+ if par == 0 {
+ par = 10
+ }
+
+ // Set up the variables in the following sequence:
+ // 0 - Take default values from the configuration
+ // 1 - Take values from TF_VAR_x environment variables
+ // 2 - Take values specified in -var flags, overriding values
+ // set by environment variables if necessary. This includes
+ // values taken from -var-file in addition.
+ variables := make(map[string]interface{})
+
+ if opts.Module != nil {
+ var err error
+ variables, err = Variables(opts.Module, opts.Variables)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ diff := opts.Diff
+ if diff == nil {
+ diff = &Diff{}
+ }
+
+ return &Context{
+ components: &basicComponentFactory{
+ providers: opts.Providers,
+ provisioners: opts.Provisioners,
+ },
+ destroy: opts.Destroy,
+ diff: diff,
+ hooks: hooks,
+ meta: opts.Meta,
+ module: opts.Module,
+ shadow: opts.Shadow,
+ state: state,
+ targets: opts.Targets,
+ uiInput: opts.UIInput,
+ variables: variables,
+
+ parallelSem: NewSemaphore(par),
+ providerInputConfig: make(map[string]map[string]interface{}),
+ sh: sh,
+ }, nil
+}
+
+type ContextGraphOpts struct {
+ // If true, validates the graph structure (checks for cycles).
+ Validate bool
+
+ // Legacy graphs only: won't prune the graph
+ Verbose bool
+}
+
+// Graph returns the graph used for the given operation type.
+//
+// The most extensive or complex graph type is GraphTypePlan.
+func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
+ if opts == nil {
+ opts = &ContextGraphOpts{Validate: true}
+ }
+
+ log.Printf("[INFO] terraform: building graph: %s", typ)
+ switch typ {
+ case GraphTypeApply:
+ return (&ApplyGraphBuilder{
+ Module: c.module,
+ Diff: c.diff,
+ State: c.state,
+ Providers: c.components.ResourceProviders(),
+ Provisioners: c.components.ResourceProvisioners(),
+ Targets: c.targets,
+ Destroy: c.destroy,
+ Validate: opts.Validate,
+ }).Build(RootModulePath)
+
+ case GraphTypeInput:
+ // The input graph is just a slightly modified plan graph
+ fallthrough
+ case GraphTypeValidate:
+ // The validate graph is just a slightly modified plan graph
+ fallthrough
+ case GraphTypePlan:
+ // Create the plan graph builder
+ p := &PlanGraphBuilder{
+ Module: c.module,
+ State: c.state,
+ Providers: c.components.ResourceProviders(),
+ Targets: c.targets,
+ Validate: opts.Validate,
+ }
+
+ // Some special cases for other graph types shared with plan currently
+ var b GraphBuilder = p
+ switch typ {
+ case GraphTypeInput:
+ b = InputGraphBuilder(p)
+ case GraphTypeValidate:
+ // We need to set the provisioners so those can be validated
+ p.Provisioners = c.components.ResourceProvisioners()
+
+ b = ValidateGraphBuilder(p)
+ }
+
+ return b.Build(RootModulePath)
+
+ case GraphTypePlanDestroy:
+ return (&DestroyPlanGraphBuilder{
+ Module: c.module,
+ State: c.state,
+ Targets: c.targets,
+ Validate: opts.Validate,
+ }).Build(RootModulePath)
+
+ case GraphTypeRefresh:
+ return (&RefreshGraphBuilder{
+ Module: c.module,
+ State: c.state,
+ Providers: c.components.ResourceProviders(),
+ Targets: c.targets,
+ Validate: opts.Validate,
+ }).Build(RootModulePath)
+ }
+
+ return nil, fmt.Errorf("unknown graph type: %s", typ)
+}
+
+// ShadowError returns any errors caught during a shadow operation.
+//
+// A shadow operation is an operation run in parallel to a real operation
+// that performs the same tasks using new logic on copied state. The results
+// are compared to ensure that the new logic works the same as the old logic.
+// The shadow never affects the real operation or return values.
+//
+// The result of the shadow operation are only available through this function
+// call after a real operation is complete.
+//
+// For API consumers of Context, you can safely ignore this function
+// completely if you have no interest in helping report experimental feature
+// errors to Terraform maintainers. Otherwise, please call this function
+// after every operation and report this to the user.
+//
+// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
+// the real state or result of a real operation. They are purely informational
+// to assist in future Terraform versions being more stable. Please message
+// this effectively to the end user.
+//
+// This must be called only when no other operation is running (refresh,
+// plan, etc.). The result can be used in parallel to any other operation
+// running.
+func (c *Context) ShadowError() error {
+ return c.shadowErr
+}
+
+// State returns a copy of the current state associated with this context.
+//
+// This cannot safely be called in parallel with any other Context function.
+func (c *Context) State() *State {
+ return c.state.DeepCopy()
+}
+
+// Interpolater returns an Interpolater built on a copy of the state
+// that can be used to test interpolation values.
+func (c *Context) Interpolater() *Interpolater {
+ var varLock sync.Mutex
+ var stateLock sync.RWMutex
+ return &Interpolater{
+ Operation: walkApply,
+ Meta: c.meta,
+ Module: c.module,
+ State: c.state.DeepCopy(),
+ StateLock: &stateLock,
+ VariableValues: c.variables,
+ VariableValuesLock: &varLock,
+ }
+}
+
+// Input asks for input to fill variables and provider configurations.
+// This modifies the configuration in-place, so asking for Input twice
+// may result in different UI output showing different current values.
+func (c *Context) Input(mode InputMode) error {
+ defer c.acquireRun("input")()
+
+ if mode&InputModeVar != 0 {
+ // Walk the variables first for the root module. We walk them in
+ // alphabetical order for UX reasons.
+ rootConf := c.module.Config()
+ names := make([]string, len(rootConf.Variables))
+ m := make(map[string]*config.Variable)
+ for i, v := range rootConf.Variables {
+ names[i] = v.Name
+ m[v.Name] = v
+ }
+ sort.Strings(names)
+ for _, n := range names {
+ // If we only care about unset variables, then if the variable
+ // is set, continue on.
+ if mode&InputModeVarUnset != 0 {
+ if _, ok := c.variables[n]; ok {
+ continue
+ }
+ }
+
+ var valueType config.VariableType
+
+ v := m[n]
+ switch valueType = v.Type(); valueType {
+ case config.VariableTypeUnknown:
+ continue
+ case config.VariableTypeMap:
+ // OK
+ case config.VariableTypeList:
+ // OK
+ case config.VariableTypeString:
+ // OK
+ default:
+ panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
+ }
+
+ // If the variable is not already set, and the variable defines a
+ // default, use that for the value.
+ if _, ok := c.variables[n]; !ok {
+ if v.Default != nil {
+ c.variables[n] = v.Default.(string)
+ continue
+ }
+ }
+
+ // this should only happen during tests
+ if c.uiInput == nil {
+ log.Println("[WARN] Content.uiInput is nil")
+ continue
+ }
+
+ // Ask the user for a value for this variable
+ var value string
+ retry := 0
+ for {
+ var err error
+ value, err = c.uiInput.Input(&InputOpts{
+ Id: fmt.Sprintf("var.%s", n),
+ Query: fmt.Sprintf("var.%s", n),
+ Description: v.Description,
+ })
+ if err != nil {
+ return fmt.Errorf(
+ "Error asking for %s: %s", n, err)
+ }
+
+ if value == "" && v.Required() {
+ // Redo if it is required, but abort if we keep getting
+ // blank entries
+ if retry > 2 {
+ return fmt.Errorf("missing required value for %q", n)
+ }
+ retry++
+ continue
+ }
+
+ break
+ }
+
+ // no value provided, so don't set the variable at all
+ if value == "" {
+ continue
+ }
+
+ decoded, err := parseVariableAsHCL(n, value, valueType)
+ if err != nil {
+ return err
+ }
+
+ if decoded != nil {
+ c.variables[n] = decoded
+ }
+ }
+ }
+
+ if mode&InputModeProvider != 0 {
+ // Build the graph
+ graph, err := c.Graph(GraphTypeInput, nil)
+ if err != nil {
+ return err
+ }
+
+ // Do the walk
+ if _, err := c.walk(graph, nil, walkInput); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Apply applies the changes represented by this context and returns
+// the resulting state.
+//
+// Even in the case an error is returned, the state may be returned and will
+// potentially be partially updated. In addition to returning the resulting
+// state, this context is updated with the latest state.
+//
+// If the state is required after an error, the caller should call
+// Context.State, rather than rely on the return value.
+//
+// TODO: Apply and Refresh should either always return a state, or rely on the
+// State() method. Currently the helper/resource testing framework relies
+// on the absence of a returned state to determine if Destroy can be
+// called, so that will need to be refactored before this can be changed.
+func (c *Context) Apply() (*State, error) {
+ defer c.acquireRun("apply")()
+
+ // Copy our own state
+ c.state = c.state.DeepCopy()
+
+ // Build the graph.
+ graph, err := c.Graph(GraphTypeApply, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Determine the operation
+ operation := walkApply
+ if c.destroy {
+ operation = walkDestroy
+ }
+
+ // Walk the graph
+ walker, err := c.walk(graph, graph, operation)
+ if len(walker.ValidationErrors) > 0 {
+ err = multierror.Append(err, walker.ValidationErrors...)
+ }
+
+ // Clean out any unused things
+ c.state.prune()
+
+ return c.state, err
+}
+
+// Plan generates an execution plan for the given context.
+//
+// The execution plan encapsulates the context and can be stored
+// in order to reinstantiate a context later for Apply.
+//
+// Plan also updates the diff of this context to be the diff generated
+// by the plan, so Apply can be called after.
+func (c *Context) Plan() (*Plan, error) {
+ defer c.acquireRun("plan")()
+
+ p := &Plan{
+ Module: c.module,
+ Vars: c.variables,
+ State: c.state,
+ Targets: c.targets,
+ }
+
+ var operation walkOperation
+ if c.destroy {
+ operation = walkPlanDestroy
+ } else {
+ // Set our state to be something temporary. We do this so that
+ // the plan can update a fake state so that variables work, then
+ // we replace it back with our old state.
+ old := c.state
+ if old == nil {
+ c.state = &State{}
+ c.state.init()
+ } else {
+ c.state = old.DeepCopy()
+ }
+ defer func() {
+ c.state = old
+ }()
+
+ operation = walkPlan
+ }
+
+ // Setup our diff
+ c.diffLock.Lock()
+ c.diff = new(Diff)
+ c.diff.init()
+ c.diffLock.Unlock()
+
+ // Build the graph.
+ graphType := GraphTypePlan
+ if c.destroy {
+ graphType = GraphTypePlanDestroy
+ }
+ graph, err := c.Graph(graphType, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Do the walk
+ walker, err := c.walk(graph, graph, operation)
+ if err != nil {
+ return nil, err
+ }
+ p.Diff = c.diff
+
+ // If this is true, it means we're running unit tests. In this case,
+ // we perform a deep copy just to ensure that all context tests also
+ // test that a diff is copy-able. This will panic if it fails. This
+ // is enabled during unit tests.
+ //
+ // This should never be true during production usage, but even if it is,
+ // it can't do any real harm.
+ if contextTestDeepCopyOnPlan {
+ p.Diff.DeepCopy()
+ }
+
+ /*
+ // We don't do the reverification during the new destroy plan because
+ // it will use a different apply process.
+ if X_legacyGraph {
+ // Now that we have a diff, we can build the exact graph that Apply will use
+ // and catch any possible cycles during the Plan phase.
+ if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
+ return nil, err
+ }
+ }
+ */
+
+ var errs error
+ if len(walker.ValidationErrors) > 0 {
+ errs = multierror.Append(errs, walker.ValidationErrors...)
+ }
+ return p, errs
+}
+
+// Refresh goes through all the resources in the state and refreshes them
+// to their latest state. This will update the state that this context
+// works with, along with returning it.
+//
+// Even in the case an error is returned, the state may be returned and
+// will potentially be partially updated.
+func (c *Context) Refresh() (*State, error) {
+ defer c.acquireRun("refresh")()
+
+ // Copy our own state
+ c.state = c.state.DeepCopy()
+
+ // Build the graph.
+ graph, err := c.Graph(GraphTypeRefresh, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Do the walk
+ if _, err := c.walk(graph, graph, walkRefresh); err != nil {
+ return nil, err
+ }
+
+ // Clean out any unused things
+ c.state.prune()
+
+ return c.state, nil
+}
+
+// Stop stops the running task.
+//
+// Stop will block until the task completes.
+func (c *Context) Stop() {
+ log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
+
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ // If we're running, then stop
+ if c.runContextCancel != nil {
+ log.Printf("[WARN] terraform: run context exists, stopping")
+
+ // Tell the hook we want to stop
+ c.sh.Stop()
+
+ // Stop the context
+ c.runContextCancel()
+ c.runContextCancel = nil
+ }
+
+ // Grab the condition var before we exit
+ if cond := c.runCond; cond != nil {
+ cond.Wait()
+ }
+
+ log.Printf("[WARN] terraform: stop complete")
+}
+
+// Validate validates the configuration and returns any warnings or errors.
+func (c *Context) Validate() ([]string, []error) {
+ defer c.acquireRun("validate")()
+
+ var errs error
+
+ // Validate the configuration itself
+ if err := c.module.Validate(); err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ // This only needs to be done for the root module, since inter-module
+ // variables are validated in the module tree.
+ if config := c.module.Config(); config != nil {
+ // Validate the user variables
+ if err := smcUserVariables(config, c.variables); len(err) > 0 {
+ errs = multierror.Append(errs, err...)
+ }
+ }
+
+ // If we have errors at this point, the graphing has no chance,
+ // so just bail early.
+ if errs != nil {
+ return nil, []error{errs}
+ }
+
+ // Build the graph so we can walk it and run Validate on nodes.
+ // We also validate the graph generated here, but this graph doesn't
+ // necessarily match the graph that Plan will generate, so we'll validate the
+ // graph again later after Planning.
+ graph, err := c.Graph(GraphTypeValidate, nil)
+ if err != nil {
+ return nil, []error{err}
+ }
+
+ // Walk
+ walker, err := c.walk(graph, graph, walkValidate)
+ if err != nil {
+ return nil, multierror.Append(errs, err).Errors
+ }
+
+ // Return the result
+ rerrs := multierror.Append(errs, walker.ValidationErrors...)
+
+ sort.Strings(walker.ValidationWarnings)
+ sort.Slice(rerrs.Errors, func(i, j int) bool {
+ return rerrs.Errors[i].Error() < rerrs.Errors[j].Error()
+ })
+
+ return walker.ValidationWarnings, rerrs.Errors
+}
+
+// Module returns the module tree associated with this context.
+func (c *Context) Module() *module.Tree {
+ return c.module
+}
+
+// Variables will return the mapping of variables that were defined
+// for this Context. If Input was called, this mapping may be different
+// than what was given.
+func (c *Context) Variables() map[string]interface{} {
+ return c.variables
+}
+
+// SetVariable sets a variable after a context has already been built.
+func (c *Context) SetVariable(k string, v interface{}) {
+ c.variables[k] = v
+}
+
+func (c *Context) acquireRun(phase string) func() {
+ // With the run lock held, grab the context lock to make changes
+ // to the run context.
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ // Wait until we're no longer running
+ for c.runCond != nil {
+ c.runCond.Wait()
+ }
+
+ // Build our lock
+ c.runCond = sync.NewCond(&c.l)
+
+ // Setup debugging
+ dbug.SetPhase(phase)
+
+ // Create a new run context
+ c.runContext, c.runContextCancel = context.WithCancel(context.Background())
+
+ // Reset the stop hook so we're not stopped
+ c.sh.Reset()
+
+ // Reset the shadow errors
+ c.shadowErr = nil
+
+ return c.releaseRun
+}
+
+func (c *Context) releaseRun() {
+ // Grab the context lock so that we can make modifications to fields
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ // setting the phase to "INVALID" lets us easily detect if we have
+ // operations happening outside of a run, or we missed setting the proper
+ // phase
+ dbug.SetPhase("INVALID")
+
+ // End our run. We check if runContext is non-nil because it can be
+ // set to nil if it was cancelled via Stop()
+ if c.runContextCancel != nil {
+ c.runContextCancel()
+ }
+
+ // Unlock all waiting our condition
+ cond := c.runCond
+ c.runCond = nil
+ cond.Broadcast()
+
+ // Unset the context
+ c.runContext = nil
+}
+
+func (c *Context) walk(
+ graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) {
+ // Keep track of the "real" context which is the context that does
+ // the real work: talking to real providers, modifying real state, etc.
+ realCtx := c
+
+ // If we don't want shadowing, remove it
+ if !experiment.Enabled(experiment.X_shadow) {
+ shadow = nil
+ }
+
+ // Just log this so we can see it in a debug log
+ if !c.shadow {
+ log.Printf("[WARN] terraform: shadow graph disabled")
+ shadow = nil
+ }
+
+ // If we have a shadow graph, walk that as well
+ var shadowCtx *Context
+ var shadowCloser Shadow
+ if shadow != nil {
+ // Build the shadow context. In the process, override the real context
+ // with the one that is wrapped so that the shadow context can verify
+ // the results of the real.
+ realCtx, shadowCtx, shadowCloser = newShadowContext(c)
+ }
+
+ log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
+
+ walker := &ContextGraphWalker{
+ Context: realCtx,
+ Operation: operation,
+ StopContext: c.runContext,
+ }
+
+ // Watch for a stop so we can call the provider Stop() API.
+ watchStop, watchWait := c.watchStop(walker)
+
+ // Walk the real graph, this will block until it completes
+ realErr := graph.Walk(walker)
+
+ // Close the channel so the watcher stops, and wait for it to return.
+ close(watchStop)
+ <-watchWait
+
+ // If we have a shadow graph and we interrupted the real graph, then
+ // we just close the shadow and never verify it. It is non-trivial to
+ // recreate the exact execution state up until an interruption so this
+ // isn't supported with shadows at the moment.
+ if shadowCloser != nil && c.sh.Stopped() {
+ // Ignore the error result, there is nothing we could care about
+ shadowCloser.CloseShadow()
+
+ // Set it to nil so we don't do anything
+ shadowCloser = nil
+ }
+
+ // If we have a shadow graph, wait for that to complete.
+ if shadowCloser != nil {
+ // Build the graph walker for the shadow. We also wrap this in
+ // a panicwrap so that panics are captured. For the shadow graph,
+ // we just want panics to be normal errors rather than to crash
+ // Terraform.
+ shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{
+ Context: shadowCtx,
+ Operation: operation,
+ })
+
+ // Kick off the shadow walk. This will block on any operations
+ // on the real walk so it is fine to start first.
+ log.Printf("[INFO] Starting shadow graph walk: %s", operation.String())
+ shadowCh := make(chan error)
+ go func() {
+ shadowCh <- shadow.Walk(shadowWalker)
+ }()
+
+ // Notify the shadow that we're done
+ if err := shadowCloser.CloseShadow(); err != nil {
+ c.shadowErr = multierror.Append(c.shadowErr, err)
+ }
+
+ // Wait for the walk to end
+ log.Printf("[DEBUG] Waiting for shadow graph to complete...")
+ shadowWalkErr := <-shadowCh
+
+ // Get any shadow errors
+ if err := shadowCloser.ShadowError(); err != nil {
+ c.shadowErr = multierror.Append(c.shadowErr, err)
+ }
+
+ // Verify the contexts (compare)
+ if err := shadowContextVerify(realCtx, shadowCtx); err != nil {
+ c.shadowErr = multierror.Append(c.shadowErr, err)
+ }
+
+ // At this point, if we're supposed to fail on error, then
+ // we PANIC. Some tests just verify that there is an error,
+ // so simply appending it to realErr and returning could hide
+ // shadow problems.
+ //
+ // This must be done BEFORE appending shadowWalkErr since the
+ // shadowWalkErr may include expected errors.
+ //
+ // We only do this if we don't have a real error. In the case of
+ // a real error, we can't guarantee what nodes were and weren't
+ // traversed in parallel scenarios so we can't guarantee no
+ // shadow errors.
+ if c.shadowErr != nil && contextFailOnShadowError && realErr == nil {
+ panic(multierror.Prefix(c.shadowErr, "shadow graph:"))
+ }
+
+ // Now, if we have a walk error, we append that through
+ if shadowWalkErr != nil {
+ c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr)
+ }
+
+ if c.shadowErr == nil {
+ log.Printf("[INFO] Shadow graph success!")
+ } else {
+ log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr)
+
+ // If we're supposed to fail on shadow errors, then report it
+ if contextFailOnShadowError {
+ realErr = multierror.Append(realErr, multierror.Prefix(
+ c.shadowErr, "shadow graph:"))
+ }
+ }
+ }
+
+ return walker, realErr
+}
+
+// watchStop immediately returns a `stop` and a `wait` chan after dispatching
+// the watchStop goroutine. This will watch the runContext for cancellation and
+// stop the providers accordingly. When the watch is no longer needed, the
+// `stop` chan should be closed before waiting on the `wait` chan.
+// The `wait` chan is important, because without synchronizing with the end of
+// the watchStop goroutine, the runContext may also be closed during the select
+// incorrectly causing providers to be stopped. Even if the graph walk is done
+// at that point, stopping a provider permanently cancels its StopContext which
+// can cause later actions to fail.
+func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {
+ stop := make(chan struct{})
+ wait := make(chan struct{})
+
+ // get the runContext cancellation channel now, because releaseRun will
+ // write to the runContext field.
+ done := c.runContext.Done()
+
+ go func() {
+ defer close(wait)
+ // Wait for a stop or completion
+ select {
+ case <-done:
+ // done means the context was canceled, so we need to try and stop
+ // providers.
+ case <-stop:
+ // our own stop channel was closed.
+ return
+ }
+
+ // If we're here, we're stopped, trigger the call.
+
+ {
+ // Copy the providers so that a misbehaved blocking Stop doesn't
+ // completely hang Terraform.
+ walker.providerLock.Lock()
+ ps := make([]ResourceProvider, 0, len(walker.providerCache))
+ for _, p := range walker.providerCache {
+ ps = append(ps, p)
+ }
+ defer walker.providerLock.Unlock()
+
+ for _, p := range ps {
+ // We ignore the error for now since there isn't any reasonable
+ // action to take if there is an error here, since the stop is still
+ // advisory: Terraform will exit once the graph node completes.
+ p.Stop()
+ }
+ }
+
+ {
+ // Call stop on all the provisioners
+ walker.provisionerLock.Lock()
+ ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))
+ for _, p := range walker.provisionerCache {
+ ps = append(ps, p)
+ }
+ defer walker.provisionerLock.Unlock()
+
+ for _, p := range ps {
+ // We ignore the error for now since there isn't any reasonable
+ // action to take if there is an error here, since the stop is still
+ // advisory: Terraform will exit once the graph node completes.
+ p.Stop()
+ }
+ }
+ }()
+
+ return stop, wait
+}
+
+// parseVariableAsHCL parses the value of a single variable as would have been specified
+// on the command line via -var or in an environment variable named TF_VAR_x, where x is
+// the name of the variable. In order to get around the restriction of HCL requiring a
+// top level object, we prepend a sentinel key, decode the user-specified value as its
+// value and pull the value back out of the resulting map.
+func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
+ // expecting a string so don't decode anything, just strip quotes
+ if targetType == config.VariableTypeString {
+ return strings.Trim(input, `"`), nil
+ }
+
+ // return empty types
+ if strings.TrimSpace(input) == "" {
+ switch targetType {
+ case config.VariableTypeList:
+ return []interface{}{}, nil
+ case config.VariableTypeMap:
+ return make(map[string]interface{}), nil
+ }
+ }
+
+ const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
+ inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input)
+
+ var decoded map[string]interface{}
+ err := hcl.Decode(&decoded, inputWithSentinal)
+ if err != nil {
+ return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err)
+ }
+
+ if len(decoded) != 1 {
+ return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input)
+ }
+
+ parsedValue, ok := decoded[sentinelValue]
+ if !ok {
+ return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
+ }
+
+ switch targetType {
+ case config.VariableTypeList:
+ return parsedValue, nil
+ case config.VariableTypeMap:
+ if list, ok := parsedValue.([]map[string]interface{}); ok {
+ return list[0], nil
+ }
+
+ return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
+ default:
+ panic(fmt.Errorf("unknown type %s", targetType.Printable()))
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
new file mode 100644
index 00000000..6f507445
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
@@ -0,0 +1,65 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// contextComponentFactory is the interface that Context uses
+// to initialize various components such as providers and provisioners.
+// This factory gets more information than the raw maps using to initialize
+// a Context. This information is used for debugging.
+type contextComponentFactory interface {
+ // ResourceProvider creates a new ResourceProvider with the given
+ // type. The "uid" is a unique identifier for this provider being
+ // initialized that can be used for internal tracking.
+ ResourceProvider(typ, uid string) (ResourceProvider, error)
+ ResourceProviders() []string
+
+ // ResourceProvisioner creates a new ResourceProvisioner with the
+ // given type. The "uid" is a unique identifier for this provisioner
+ // being initialized that can be used for internal tracking.
+ ResourceProvisioner(typ, uid string) (ResourceProvisioner, error)
+ ResourceProvisioners() []string
+}
+
+// basicComponentFactory just calls a factory from a map directly.
+type basicComponentFactory struct {
+ providers map[string]ResourceProviderFactory
+ provisioners map[string]ResourceProvisionerFactory
+}
+
+func (c *basicComponentFactory) ResourceProviders() []string {
+ result := make([]string, len(c.providers))
+ for k, _ := range c.providers {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+func (c *basicComponentFactory) ResourceProvisioners() []string {
+ result := make([]string, len(c.provisioners))
+ for k, _ := range c.provisioners {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) {
+ f, ok := c.providers[typ]
+ if !ok {
+ return nil, fmt.Errorf("unknown provider %q", typ)
+ }
+
+ return f()
+}
+
+func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) {
+ f, ok := c.provisioners[typ]
+ if !ok {
+ return nil, fmt.Errorf("unknown provisioner %q", typ)
+ }
+
+ return f()
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
new file mode 100644
index 00000000..084f0105
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
@@ -0,0 +1,32 @@
+package terraform
+
+//go:generate stringer -type=GraphType context_graph_type.go
+
+// GraphType is an enum of the type of graph to create with a Context.
+// The values of the constants may change so they shouldn't be depended on;
+// always use the constant name.
+type GraphType byte
+
+const (
+ GraphTypeInvalid GraphType = 0
+ GraphTypeLegacy GraphType = iota
+ GraphTypeRefresh
+ GraphTypePlan
+ GraphTypePlanDestroy
+ GraphTypeApply
+ GraphTypeInput
+ GraphTypeValidate
+)
+
+// GraphTypeMap is a mapping of human-readable string to GraphType. This
+// is useful to use as the mechanism for human input for configurable
+// graph types.
+var GraphTypeMap = map[string]GraphType{
+ "apply": GraphTypeApply,
+ "input": GraphTypeInput,
+ "plan": GraphTypePlan,
+ "plan-destroy": GraphTypePlanDestroy,
+ "refresh": GraphTypeRefresh,
+ "legacy": GraphTypeLegacy,
+ "validate": GraphTypeValidate,
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
new file mode 100644
index 00000000..f1d57760
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
@@ -0,0 +1,77 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// ImportOpts are used as the configuration for Import.
+type ImportOpts struct {
+ // Targets are the targets to import
+ Targets []*ImportTarget
+
+ // Module is optional, and specifies a config module that is loaded
+ // into the graph and evaluated. The use case for this is to provide
+ // provider configuration.
+ Module *module.Tree
+}
+
+// ImportTarget is a single resource to import.
+type ImportTarget struct {
+ // Addr is the full resource address of the resource to import.
+ // Example: "module.foo.aws_instance.bar"
+ Addr string
+
+ // ID is the ID of the resource to import. This is resource-specific.
+ ID string
+
+ // Provider string
+ Provider string
+}
+
+// Import takes already-created external resources and brings them
+// under Terraform management. Import requires the exact type, name, and ID
+// of the resources to import.
+//
+// This operation is idempotent. If the requested resource is already
+// imported, no changes are made to the state.
+//
+// Further, this operation also gracefully handles partial state. If during
+// an import there is a failure, all previously imported resources remain
+// imported.
+func (c *Context) Import(opts *ImportOpts) (*State, error) {
+ // Hold a lock since we can modify our own state here
+ defer c.acquireRun("import")()
+
+ // Copy our own state
+ c.state = c.state.DeepCopy()
+
+ // If no module is given, default to the module configured with
+ // the Context.
+ module := opts.Module
+ if module == nil {
+ module = c.module
+ }
+
+ // Initialize our graph builder
+ builder := &ImportGraphBuilder{
+ ImportTargets: opts.Targets,
+ Module: module,
+ Providers: c.components.ResourceProviders(),
+ }
+
+ // Build the graph!
+ graph, err := builder.Build(RootModulePath)
+ if err != nil {
+ return c.state, err
+ }
+
+ // Walk it
+ if _, err := c.walk(graph, nil, walkImport); err != nil {
+ return c.state, err
+ }
+
+ // Clean the state
+ c.state.prune()
+
+ return c.state, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go
new file mode 100644
index 00000000..265339f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/debug.go
@@ -0,0 +1,523 @@
+package terraform
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+)
+
+// DebugInfo is the global handler for writing the debug archive. All methods
+// are safe to call concurrently. Setting DebugInfo to nil will disable writing
+// the debug archive. All methods are safe to call on the nil value.
+var dbug *debugInfo
+
+// SetDebugInfo initializes the debug handler with a backing file in the
+// provided directory. This must be called before any other terraform package
+// operations or not at all. Once his is called, CloseDebugInfo should be
+// called before program exit.
+func SetDebugInfo(path string) error {
+ if os.Getenv("TF_DEBUG") == "" {
+ return nil
+ }
+
+ di, err := newDebugInfoFile(path)
+ if err != nil {
+ return err
+ }
+
+ dbug = di
+ return nil
+}
+
+// CloseDebugInfo is the exported interface to Close the debug info handler.
+// The debug handler needs to be closed before program exit, so we export this
+// function to be deferred in the appropriate entrypoint for our executable.
+func CloseDebugInfo() error {
+ return dbug.Close()
+}
+
+// newDebugInfoFile initializes the global debug handler with a backing file in
+// the provided directory.
+func newDebugInfoFile(dir string) (*debugInfo, error) {
+ err := os.MkdirAll(dir, 0755)
+ if err != nil {
+ return nil, err
+ }
+
+ // FIXME: not guaranteed unique, but good enough for now
+ name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999"))
+ archivePath := filepath.Join(dir, name+".tar.gz")
+
+ f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil {
+ return nil, err
+ }
+ return newDebugInfo(name, f)
+}
+
+// newDebugInfo initializes the global debug handler.
+func newDebugInfo(name string, w io.Writer) (*debugInfo, error) {
+ gz := gzip.NewWriter(w)
+
+ d := &debugInfo{
+ name: name,
+ w: w,
+ gz: gz,
+ tar: tar.NewWriter(gz),
+ }
+
+ // create the subdirs we need
+ topHdr := &tar.Header{
+ Name: name,
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ }
+ graphsHdr := &tar.Header{
+ Name: name + "/graphs",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ }
+ err := d.tar.WriteHeader(topHdr)
+ // if the first errors, the second will too
+ err = d.tar.WriteHeader(graphsHdr)
+ if err != nil {
+ return nil, err
+ }
+
+ return d, nil
+}
+
+// debugInfo provides various methods for writing debug information to a
+// central archive. The debugInfo struct should be initialized once before any
+// output is written, and Close should be called before program exit. All
+// exported methods on debugInfo will be safe for concurrent use. The exported
+// methods are also all safe to call on a nil pointer, so that there is no need
+// for conditional blocks before writing debug information.
+//
+// Each write operation done by the debugInfo will flush the gzip.Writer and
+// tar.Writer, and call Sync() or Flush() on the output writer as needed. This
+// ensures that as much data as possible is written to storage in the event of
+// a crash. The append format of the tar file, and the stream format of the
+// gzip writer allow easy recovery f the data in the event that the debugInfo
+// is not closed before program exit.
+type debugInfo struct {
+ sync.Mutex
+
+ // archive root directory name
+ name string
+
+ // current operation phase
+ phase string
+
+ // step is monotonic counter for for recording the order of operations
+ step int
+
+ // flag to protect Close()
+ closed bool
+
+ // the debug log output is in a tar.gz format, written to the io.Writer w
+ w io.Writer
+ gz *gzip.Writer
+ tar *tar.Writer
+}
+
+// Set the name of the current operational phase in the debug handler. Each file
+// in the archive will contain the name of the phase in which it was created,
+// i.e. "input", "apply", "plan", "refresh", "validate"
+func (d *debugInfo) SetPhase(phase string) {
+ if d == nil {
+ return
+ }
+ d.Lock()
+ defer d.Unlock()
+
+ d.phase = phase
+}
+
+// Close the debugInfo, finalizing the data in storage. This closes the
+// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is
+// also closed.
+func (d *debugInfo) Close() error {
+ if d == nil {
+ return nil
+ }
+
+ d.Lock()
+ defer d.Unlock()
+
+ if d.closed {
+ return nil
+ }
+ d.closed = true
+
+ d.tar.Close()
+ d.gz.Close()
+
+ if c, ok := d.w.(io.Closer); ok {
+ return c.Close()
+ }
+ return nil
+}
+
+// debug buffer is an io.WriteCloser that will write itself to the debug
+// archive when closed.
+type debugBuffer struct {
+ debugInfo *debugInfo
+ name string
+ buf bytes.Buffer
+}
+
+func (b *debugBuffer) Write(d []byte) (int, error) {
+ return b.buf.Write(d)
+}
+
+func (b *debugBuffer) Close() error {
+ return b.debugInfo.WriteFile(b.name, b.buf.Bytes())
+}
+
+// ioutils only has a noop ReadCloser
+type nopWriteCloser struct{}
+
+func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil }
+func (nopWriteCloser) Close() error { return nil }
+
+// NewFileWriter returns an io.WriteClose that will be buffered and written to
+// the debug archive when closed.
+func (d *debugInfo) NewFileWriter(name string) io.WriteCloser {
+ if d == nil {
+ return nopWriteCloser{}
+ }
+
+ return &debugBuffer{
+ debugInfo: d,
+ name: name,
+ }
+}
+
+type syncer interface {
+ Sync() error
+}
+
+type flusher interface {
+ Flush() error
+}
+
+// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called
+// on the output writer if they are available.
+func (d *debugInfo) flush() {
+ d.tar.Flush()
+ d.gz.Flush()
+
+ if f, ok := d.w.(flusher); ok {
+ f.Flush()
+ }
+
+ if s, ok := d.w.(syncer); ok {
+ s.Sync()
+ }
+}
+
+// WriteFile writes data as a single file to the debug arhive.
+func (d *debugInfo) WriteFile(name string, data []byte) error {
+ if d == nil {
+ return nil
+ }
+
+ d.Lock()
+ defer d.Unlock()
+ return d.writeFile(name, data)
+}
+
+func (d *debugInfo) writeFile(name string, data []byte) error {
+ defer d.flush()
+ path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name)
+ d.step++
+
+ hdr := &tar.Header{
+ Name: path,
+ Mode: 0644,
+ Size: int64(len(data)),
+ }
+ err := d.tar.WriteHeader(hdr)
+ if err != nil {
+ return err
+ }
+
+ _, err = d.tar.Write(data)
+ return err
+}
+
+// DebugHook implements all methods of the terraform.Hook interface, and writes
+// the arguments to a file in the archive. When a suitable format for the
+// argument isn't available, the argument is encoded using json.Marshal. If the
+// debug handler is nil, all DebugHook methods are noop, so no time is spent in
+// marshaling the data structures.
+type DebugHook struct{}
+
+func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String() + "\n")
+ }
+
+ idCopy, err := id.Copy()
+ if err != nil {
+ return HookActionContinue, err
+ }
+ js, err := json.MarshalIndent(idCopy, "", " ")
+ if err != nil {
+ return HookActionContinue, err
+ }
+ buf.Write(js)
+
+ dbug.WriteFile("hook-PreApply", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String() + "\n")
+ }
+
+ if err != nil {
+ buf.WriteString(err.Error())
+ }
+
+ dbug.WriteFile("hook-PostApply", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PreDiff", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ idCopy, err := id.Copy()
+ if err != nil {
+ return HookActionContinue, err
+ }
+ js, err := json.MarshalIndent(idCopy, "", " ")
+ if err != nil {
+ return HookActionContinue, err
+ }
+ buf.Write(js)
+
+ dbug.WriteFile("hook-PostDiff", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PreProvisionResource", buf.Bytes())
+
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PostProvisionResource", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+ buf.WriteString(s + "\n")
+
+ dbug.WriteFile("hook-PreProvision", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+ buf.WriteString(s + "\n")
+
+ dbug.WriteFile("hook-PostProvision", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) {
+ if dbug == nil {
+ return
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+ buf.WriteString(s1 + "\n")
+ buf.WriteString(s2 + "\n")
+
+ dbug.WriteFile("hook-ProvisionOutput", buf.Bytes())
+}
+
+func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PreRefresh", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+
+ if is != nil {
+ buf.WriteString(is.String())
+ buf.WriteString("\n")
+ }
+ dbug.WriteFile("hook-PostRefresh", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+ if ii != nil {
+ buf.WriteString(ii.HumanId())
+ buf.WriteString("\n")
+ }
+ buf.WriteString(s + "\n")
+
+ dbug.WriteFile("hook-PreImportState", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) {
+ if dbug == nil {
+ return HookActionContinue, nil
+ }
+
+ var buf bytes.Buffer
+
+ if ii != nil {
+ buf.WriteString(ii.HumanId() + "\n")
+ }
+
+ for _, is := range iss {
+ if is != nil {
+ buf.WriteString(is.String() + "\n")
+ }
+ }
+ dbug.WriteFile("hook-PostImportState", buf.Bytes())
+ return HookActionContinue, nil
+}
+
+// skip logging this for now, since it could be huge
+func (*DebugHook) PostStateUpdate(*State) (HookAction, error) {
+ return HookActionContinue, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go
new file mode 100644
index 00000000..a9fae6c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go
@@ -0,0 +1,866 @@
+package terraform
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/mitchellh/copystructure"
+)
+
+// DiffChangeType is an enum with the kind of changes a diff has planned.
+type DiffChangeType byte
+
+const (
+ DiffInvalid DiffChangeType = iota
+ DiffNone
+ DiffCreate
+ DiffUpdate
+ DiffDestroy
+ DiffDestroyCreate
+)
+
+// multiVal matches the index key to a flatmapped set, list or map
+var multiVal = regexp.MustCompile(`\.(#|%)$`)
+
+// Diff trackes the changes that are necessary to apply a configuration
+// to an existing infrastructure.
+type Diff struct {
+ // Modules contains all the modules that have a diff
+ Modules []*ModuleDiff
+}
+
+// Prune cleans out unused structures in the diff without affecting
+// the behavior of the diff at all.
+//
+// This is not safe to call concurrently. This is safe to call on a
+// nil Diff.
+func (d *Diff) Prune() {
+ if d == nil {
+ return
+ }
+
+ // Prune all empty modules
+ newModules := make([]*ModuleDiff, 0, len(d.Modules))
+ for _, m := range d.Modules {
+ // If the module isn't empty, we keep it
+ if !m.Empty() {
+ newModules = append(newModules, m)
+ }
+ }
+ if len(newModules) == 0 {
+ newModules = nil
+ }
+ d.Modules = newModules
+}
+
+// AddModule adds the module with the given path to the diff.
+//
+// This should be the preferred method to add module diffs since it
+// allows us to optimize lookups later as well as control sorting.
+func (d *Diff) AddModule(path []string) *ModuleDiff {
+ m := &ModuleDiff{Path: path}
+ m.init()
+ d.Modules = append(d.Modules, m)
+ return m
+}
+
+// ModuleByPath is used to lookup the module diff for the given path.
+// This should be the preferred lookup mechanism as it allows for future
+// lookup optimizations.
+func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
+ if d == nil {
+ return nil
+ }
+ for _, mod := range d.Modules {
+ if mod.Path == nil {
+ panic("missing module path")
+ }
+ if reflect.DeepEqual(mod.Path, path) {
+ return mod
+ }
+ }
+ return nil
+}
+
+// RootModule returns the ModuleState for the root module
+func (d *Diff) RootModule() *ModuleDiff {
+ root := d.ModuleByPath(rootModulePath)
+ if root == nil {
+ panic("missing root module")
+ }
+ return root
+}
+
+// Empty returns true if the diff has no changes.
+func (d *Diff) Empty() bool {
+ if d == nil {
+ return true
+ }
+
+ for _, m := range d.Modules {
+ if !m.Empty() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Equal compares two diffs for exact equality.
+//
+// This is different from the Same comparison that is supported which
+// checks for operation equality taking into account computed values. Equal
+// instead checks for exact equality.
+func (d *Diff) Equal(d2 *Diff) bool {
+ // If one is nil, they must both be nil
+ if d == nil || d2 == nil {
+ return d == d2
+ }
+
+ // Sort the modules
+ sort.Sort(moduleDiffSort(d.Modules))
+ sort.Sort(moduleDiffSort(d2.Modules))
+
+ // Copy since we have to modify the module destroy flag to false so
+ // we don't compare that. TODO: delete this when we get rid of the
+ // destroy flag on modules.
+ dCopy := d.DeepCopy()
+ d2Copy := d2.DeepCopy()
+ for _, m := range dCopy.Modules {
+ m.Destroy = false
+ }
+ for _, m := range d2Copy.Modules {
+ m.Destroy = false
+ }
+
+ // Use DeepEqual
+ return reflect.DeepEqual(dCopy, d2Copy)
+}
+
+// DeepCopy performs a deep copy of all parts of the Diff, making the
+// resulting Diff safe to use without modifying this one.
+func (d *Diff) DeepCopy() *Diff {
+ copy, err := copystructure.Config{Lock: true}.Copy(d)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*Diff)
+}
+
+func (d *Diff) String() string {
+ var buf bytes.Buffer
+
+ keys := make([]string, 0, len(d.Modules))
+ lookup := make(map[string]*ModuleDiff)
+ for _, m := range d.Modules {
+ key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], "."))
+ keys = append(keys, key)
+ lookup[key] = m
+ }
+ sort.Strings(keys)
+
+ for _, key := range keys {
+ m := lookup[key]
+ mStr := m.String()
+
+ // If we're the root module, we just write the output directly.
+ if reflect.DeepEqual(m.Path, rootModulePath) {
+ buf.WriteString(mStr + "\n")
+ continue
+ }
+
+ buf.WriteString(fmt.Sprintf("%s:\n", key))
+
+ s := bufio.NewScanner(strings.NewReader(mStr))
+ for s.Scan() {
+ buf.WriteString(fmt.Sprintf(" %s\n", s.Text()))
+ }
+ }
+
+ return strings.TrimSpace(buf.String())
+}
+
+func (d *Diff) init() {
+ if d.Modules == nil {
+ rootDiff := &ModuleDiff{Path: rootModulePath}
+ d.Modules = []*ModuleDiff{rootDiff}
+ }
+ for _, m := range d.Modules {
+ m.init()
+ }
+}
+
+// ModuleDiff tracks the differences between resources to apply within
+// a single module.
+type ModuleDiff struct {
+ Path []string
+ Resources map[string]*InstanceDiff
+ Destroy bool // Set only by the destroy plan
+}
+
+func (d *ModuleDiff) init() {
+ if d.Resources == nil {
+ d.Resources = make(map[string]*InstanceDiff)
+ }
+ for _, r := range d.Resources {
+ r.init()
+ }
+}
+
+// ChangeType returns the type of changes that the diff for this
+// module includes.
+//
+// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or
+// DiffCreate. If an instance within the module has a DiffDestroyCreate
+// then this will register as a DiffCreate for a module.
+func (d *ModuleDiff) ChangeType() DiffChangeType {
+ result := DiffNone
+ for _, r := range d.Resources {
+ change := r.ChangeType()
+ switch change {
+ case DiffCreate, DiffDestroy:
+ if result == DiffNone {
+ result = change
+ }
+ case DiffDestroyCreate, DiffUpdate:
+ result = DiffUpdate
+ }
+ }
+
+ return result
+}
+
+// Empty returns true if the diff has no changes within this module.
+func (d *ModuleDiff) Empty() bool {
+ if d.Destroy {
+ return false
+ }
+
+ if len(d.Resources) == 0 {
+ return true
+ }
+
+ for _, rd := range d.Resources {
+ if !rd.Empty() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Instances returns the instance diffs for the id given. This can return
+// multiple instance diffs if there are counts within the resource.
+func (d *ModuleDiff) Instances(id string) []*InstanceDiff {
+ var result []*InstanceDiff
+ for k, diff := range d.Resources {
+ if k == id || strings.HasPrefix(k, id+".") {
+ if !diff.Empty() {
+ result = append(result, diff)
+ }
+ }
+ }
+
+ return result
+}
+
+// IsRoot says whether or not this module diff is for the root module.
+func (d *ModuleDiff) IsRoot() bool {
+ return reflect.DeepEqual(d.Path, rootModulePath)
+}
+
+// String outputs the diff in a long but command-line friendly output
+// format that users can read to quickly inspect a diff.
+func (d *ModuleDiff) String() string {
+ var buf bytes.Buffer
+
+ names := make([]string, 0, len(d.Resources))
+ for name, _ := range d.Resources {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ for _, name := range names {
+ rdiff := d.Resources[name]
+
+ crud := "UPDATE"
+ switch {
+ case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()):
+ crud = "DESTROY/CREATE"
+ case rdiff.GetDestroy() || rdiff.GetDestroyDeposed():
+ crud = "DESTROY"
+ case rdiff.RequiresNew():
+ crud = "CREATE"
+ }
+
+ extra := ""
+ if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() {
+ extra = " (deposed only)"
+ }
+
+ buf.WriteString(fmt.Sprintf(
+ "%s: %s%s\n",
+ crud,
+ name,
+ extra))
+
+ keyLen := 0
+ rdiffAttrs := rdiff.CopyAttributes()
+ keys := make([]string, 0, len(rdiffAttrs))
+ for key, _ := range rdiffAttrs {
+ if key == "id" {
+ continue
+ }
+
+ keys = append(keys, key)
+ if len(key) > keyLen {
+ keyLen = len(key)
+ }
+ }
+ sort.Strings(keys)
+
+ for _, attrK := range keys {
+ attrDiff, _ := rdiff.GetAttribute(attrK)
+
+ v := attrDiff.New
+ u := attrDiff.Old
+ if attrDiff.NewComputed {
+ v = "<computed>"
+ }
+
+ if attrDiff.Sensitive {
+ u = "<sensitive>"
+ v = "<sensitive>"
+ }
+
+ updateMsg := ""
+ if attrDiff.RequiresNew {
+ updateMsg = " (forces new resource)"
+ } else if attrDiff.Sensitive {
+ updateMsg = " (attribute changed)"
+ }
+
+ buf.WriteString(fmt.Sprintf(
+ " %s:%s %#v => %#v%s\n",
+ attrK,
+ strings.Repeat(" ", keyLen-len(attrK)),
+ u,
+ v,
+ updateMsg))
+ }
+ }
+
+ return buf.String()
+}
+
+// InstanceDiff is the diff of a resource from some state to another.
+type InstanceDiff struct {
+ mu sync.Mutex
+ Attributes map[string]*ResourceAttrDiff
+ Destroy bool
+ DestroyDeposed bool
+ DestroyTainted bool
+
+ // Meta is a simple K/V map that is stored in a diff and persisted to
+ // plans but otherwise is completely ignored by Terraform core. It is
+ // mean to be used for additional data a resource may want to pass through.
+ // The value here must only contain Go primitives and collections.
+ Meta map[string]interface{}
+}
+
+func (d *InstanceDiff) Lock() { d.mu.Lock() }
+func (d *InstanceDiff) Unlock() { d.mu.Unlock() }
+
+// ResourceAttrDiff is the diff of a single attribute of a resource.
+type ResourceAttrDiff struct {
+ Old string // Old Value
+ New string // New Value
+ NewComputed bool // True if new value is computed (unknown currently)
+ NewRemoved bool // True if this attribute is being removed
+ NewExtra interface{} // Extra information for the provider
+ RequiresNew bool // True if change requires new resource
+ Sensitive bool // True if the data should not be displayed in UI output
+ Type DiffAttrType
+}
+
+// Empty returns true if the diff for this attr is neutral
+func (d *ResourceAttrDiff) Empty() bool {
+ return d.Old == d.New && !d.NewComputed && !d.NewRemoved
+}
+
+func (d *ResourceAttrDiff) GoString() string {
+ return fmt.Sprintf("*%#v", *d)
+}
+
+// DiffAttrType is an enum type that says whether a resource attribute
+// diff is an input attribute (comes from the configuration) or an
+// output attribute (comes as a result of applying the configuration). An
+// example input would be "ami" for AWS and an example output would be
+// "private_ip".
+type DiffAttrType byte
+
+const (
+ DiffAttrUnknown DiffAttrType = iota
+ DiffAttrInput
+ DiffAttrOutput
+)
+
+func (d *InstanceDiff) init() {
+ if d.Attributes == nil {
+ d.Attributes = make(map[string]*ResourceAttrDiff)
+ }
+}
+
+func NewInstanceDiff() *InstanceDiff {
+ return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)}
+}
+
+func (d *InstanceDiff) Copy() (*InstanceDiff, error) {
+ if d == nil {
+ return nil, nil
+ }
+
+ dCopy, err := copystructure.Config{Lock: true}.Copy(d)
+ if err != nil {
+ return nil, err
+ }
+
+ return dCopy.(*InstanceDiff), nil
+}
+
+// ChangeType returns the DiffChangeType represented by the diff
+// for this single instance.
+func (d *InstanceDiff) ChangeType() DiffChangeType {
+ if d.Empty() {
+ return DiffNone
+ }
+
+ if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) {
+ return DiffDestroyCreate
+ }
+
+ if d.GetDestroy() || d.GetDestroyDeposed() {
+ return DiffDestroy
+ }
+
+ if d.RequiresNew() {
+ return DiffCreate
+ }
+
+ return DiffUpdate
+}
+
+// Empty returns true if this diff encapsulates no changes.
+func (d *InstanceDiff) Empty() bool {
+ if d == nil {
+ return true
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ return !d.Destroy &&
+ !d.DestroyTainted &&
+ !d.DestroyDeposed &&
+ len(d.Attributes) == 0
+}
+
+// Equal compares two diffs for exact equality.
+//
+// This is different from the Same comparison that is supported which
+// checks for operation equality taking into account computed values. Equal
+// instead checks for exact equality.
+func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool {
+ // If one is nil, they must both be nil
+ if d == nil || d2 == nil {
+ return d == d2
+ }
+
+ // Use DeepEqual
+ return reflect.DeepEqual(d, d2)
+}
+
+// DeepCopy performs a deep copy of all parts of the InstanceDiff
+func (d *InstanceDiff) DeepCopy() *InstanceDiff {
+ copy, err := copystructure.Config{Lock: true}.Copy(d)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*InstanceDiff)
+}
+
+func (d *InstanceDiff) GoString() string {
+ return fmt.Sprintf("*%#v", InstanceDiff{
+ Attributes: d.Attributes,
+ Destroy: d.Destroy,
+ DestroyTainted: d.DestroyTainted,
+ DestroyDeposed: d.DestroyDeposed,
+ })
+}
+
+// RequiresNew returns true if the diff requires the creation of a new
+// resource (implying the destruction of the old).
+func (d *InstanceDiff) RequiresNew() bool {
+ if d == nil {
+ return false
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.requiresNew()
+}
+
+func (d *InstanceDiff) requiresNew() bool {
+ if d == nil {
+ return false
+ }
+
+ if d.DestroyTainted {
+ return true
+ }
+
+ for _, rd := range d.Attributes {
+ if rd != nil && rd.RequiresNew {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (d *InstanceDiff) GetDestroyDeposed() bool {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.DestroyDeposed
+}
+
+func (d *InstanceDiff) SetDestroyDeposed(b bool) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.DestroyDeposed = b
+}
+
+// These methods are properly locked, for use outside other InstanceDiff
+// methods but everywhere else within in the terraform package.
+// TODO refactor the locking scheme
+func (d *InstanceDiff) SetTainted(b bool) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.DestroyTainted = b
+}
+
+func (d *InstanceDiff) GetDestroyTainted() bool {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.DestroyTainted
+}
+
+func (d *InstanceDiff) SetDestroy(b bool) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.Destroy = b
+}
+
+func (d *InstanceDiff) GetDestroy() bool {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.Destroy
+}
+
+func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.Attributes[key] = attr
+}
+
+func (d *InstanceDiff) DelAttribute(key string) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ delete(d.Attributes, key)
+}
+
+func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ attr, ok := d.Attributes[key]
+ return attr, ok
+}
+func (d *InstanceDiff) GetAttributesLen() int {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return len(d.Attributes)
+}
+
+// Safely copies the Attributes map
+func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ attrs := make(map[string]*ResourceAttrDiff)
+ for k, v := range d.Attributes {
+ attrs[k] = v
+ }
+
+ return attrs
+}
+
+// Same checks whether or not two InstanceDiff's are the "same". When
+// we say "same", it is not necessarily exactly equal. Instead, it is
+// just checking that the same attributes are changing, a destroy
+// isn't suddenly happening, etc.
+func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) {
+ // we can safely compare the pointers without a lock
+ switch {
+ case d == nil && d2 == nil:
+ return true, ""
+ case d == nil || d2 == nil:
+ return false, "one nil"
+ case d == d2:
+ return true, ""
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ // If we're going from requiring new to NOT requiring new, then we have
+ // to see if all required news were computed. If so, it is allowed since
+ // computed may also mean "same value and therefore not new".
+ oldNew := d.requiresNew()
+ newNew := d2.RequiresNew()
+ if oldNew && !newNew {
+ oldNew = false
+
+ // This section builds a list of ignorable attributes for requiresNew
+ // by removing off any elements of collections going to zero elements.
+ // For collections going to zero, they may not exist at all in the
+ // new diff (and hence RequiresNew == false).
+ ignoreAttrs := make(map[string]struct{})
+ for k, diffOld := range d.Attributes {
+ if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") {
+ continue
+ }
+
+ // This case is in here as a protection measure. The bug that this
+ // code originally fixed (GH-11349) didn't have to deal with computed
+ // so I'm not 100% sure what the correct behavior is. Best to leave
+ // the old behavior.
+ if diffOld.NewComputed {
+ continue
+ }
+
+ // We're looking for the case a map goes to exactly 0.
+ if diffOld.New != "0" {
+ continue
+ }
+
+ // Found it! Ignore all of these. The prefix here is stripping
+ // off the "%" so it is just "k."
+ prefix := k[:len(k)-1]
+ for k2, _ := range d.Attributes {
+ if strings.HasPrefix(k2, prefix) {
+ ignoreAttrs[k2] = struct{}{}
+ }
+ }
+ }
+
+ for k, rd := range d.Attributes {
+ if _, ok := ignoreAttrs[k]; ok {
+ continue
+ }
+
+ // If the field is requires new and NOT computed, then what
+ // we have is a diff mismatch for sure. We set that the old
+ // diff does REQUIRE a ForceNew.
+ if rd != nil && rd.RequiresNew && !rd.NewComputed {
+ oldNew = true
+ break
+ }
+ }
+ }
+
+ if oldNew != newNew {
+ return false, fmt.Sprintf(
+ "diff RequiresNew; old: %t, new: %t", oldNew, newNew)
+ }
+
+ // Verify that destroy matches. The second boolean here allows us to
+ // have mismatching Destroy if we're moving from RequiresNew true
+ // to false above. Therefore, the second boolean will only pass if
+ // we're moving from Destroy: true to false as well.
+ if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew {
+ return false, fmt.Sprintf(
+ "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy())
+ }
+
+ // Go through the old diff and make sure the new diff has all the
+ // same attributes. To start, build up the check map to be all the keys.
+ checkOld := make(map[string]struct{})
+ checkNew := make(map[string]struct{})
+ for k, _ := range d.Attributes {
+ checkOld[k] = struct{}{}
+ }
+ for k, _ := range d2.CopyAttributes() {
+ checkNew[k] = struct{}{}
+ }
+
+ // Make an ordered list so we are sure the approximated hashes are left
+ // to process at the end of the loop
+ keys := make([]string, 0, len(d.Attributes))
+ for k, _ := range d.Attributes {
+ keys = append(keys, k)
+ }
+ sort.StringSlice(keys).Sort()
+
+ for _, k := range keys {
+ diffOld := d.Attributes[k]
+
+ if _, ok := checkOld[k]; !ok {
+ // We're not checking this key for whatever reason (see where
+ // check is modified).
+ continue
+ }
+
+ // Remove this key since we'll never hit it again
+ delete(checkOld, k)
+ delete(checkNew, k)
+
+ _, ok := d2.GetAttribute(k)
+ if !ok {
+ // If there's no new attribute, and the old diff expected the attribute
+ // to be removed, that's just fine.
+ if diffOld.NewRemoved {
+ continue
+ }
+
+ // If the last diff was a computed value then the absense of
+ // that value is allowed since it may mean the value ended up
+ // being the same.
+ if diffOld.NewComputed {
+ ok = true
+ }
+
+ // No exact match, but maybe this is a set containing computed
+ // values. So check if there is an approximate hash in the key
+ // and if so, try to match the key.
+ if strings.Contains(k, "~") {
+ parts := strings.Split(k, ".")
+ parts2 := append([]string(nil), parts...)
+
+ re := regexp.MustCompile(`^~\d+$`)
+ for i, part := range parts {
+ if re.MatchString(part) {
+ // we're going to consider this the base of a
+ // computed hash, and remove all longer matching fields
+ ok = true
+
+ parts2[i] = `\d+`
+ parts2 = parts2[:i+1]
+ break
+ }
+ }
+
+ re, err := regexp.Compile("^" + strings.Join(parts2, `\.`))
+ if err != nil {
+ return false, fmt.Sprintf("regexp failed to compile; err: %#v", err)
+ }
+
+ for k2, _ := range checkNew {
+ if re.MatchString(k2) {
+ delete(checkNew, k2)
+ }
+ }
+ }
+
+ // This is a little tricky, but when a diff contains a computed
+ // list, set, or map that can only be interpolated after the apply
+ // command has created the dependent resources, it could turn out
+ // that the result is actually the same as the existing state which
+ // would remove the key from the diff.
+ if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
+ ok = true
+ }
+
+ // Similarly, in a RequiresNew scenario, a list that shows up in the plan
+ // diff can disappear from the apply diff, which is calculated from an
+ // empty state.
+ if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
+ ok = true
+ }
+
+ if !ok {
+ return false, fmt.Sprintf("attribute mismatch: %s", k)
+ }
+ }
+
+ // search for the suffix of the base of a [computed] map, list or set.
+ match := multiVal.FindStringSubmatch(k)
+
+ if diffOld.NewComputed && len(match) == 2 {
+ matchLen := len(match[1])
+
+ // This is a computed list, set, or map, so remove any keys with
+ // this prefix from the check list.
+ kprefix := k[:len(k)-matchLen]
+ for k2, _ := range checkOld {
+ if strings.HasPrefix(k2, kprefix) {
+ delete(checkOld, k2)
+ }
+ }
+ for k2, _ := range checkNew {
+ if strings.HasPrefix(k2, kprefix) {
+ delete(checkNew, k2)
+ }
+ }
+ }
+
+ // TODO: check for the same value if not computed
+ }
+
+ // Check for leftover attributes
+ if len(checkNew) > 0 {
+ extras := make([]string, 0, len(checkNew))
+ for attr, _ := range checkNew {
+ extras = append(extras, attr)
+ }
+ return false,
+ fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", "))
+ }
+
+ return true, ""
+}
+
+// moduleDiffSort implements sort.Interface to sort module diffs by path.
+type moduleDiffSort []*ModuleDiff
+
+func (s moduleDiffSort) Len() int { return len(s) }
+func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s moduleDiffSort) Less(i, j int) bool {
+ a := s[i]
+ b := s[j]
+
+ // If the lengths are different, then the shorter one always wins
+ if len(a.Path) != len(b.Path) {
+ return len(a.Path) < len(b.Path)
+ }
+
+ // Otherwise, compare lexically
+ return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
new file mode 100644
index 00000000..bc9d638a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
@@ -0,0 +1,17 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// DestroyEdge is an edge that represents a standard "destroy" relationship:
+// Target depends on Source because Source is destroying.
+type DestroyEdge struct {
+ S, T dag.Vertex
+}
+
+func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) }
+func (e *DestroyEdge) Source() dag.Vertex { return e.S }
+func (e *DestroyEdge) Target() dag.Vertex { return e.T }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go
new file mode 100644
index 00000000..3cb088a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go
@@ -0,0 +1,63 @@
+package terraform
+
+import (
+ "log"
+ "strings"
+)
+
+// EvalNode is the interface that must be implemented by graph nodes to
+// evaluate/execute.
+type EvalNode interface {
+ // Eval evaluates this node with the given context. The second parameter
+ // are the argument values. These will match in order and 1-1 with the
+ // results of the Args() return value.
+ Eval(EvalContext) (interface{}, error)
+}
+
+// GraphNodeEvalable is the interface that graph nodes must implement
+// to enable valuation.
+type GraphNodeEvalable interface {
+ EvalTree() EvalNode
+}
+
+// EvalEarlyExitError is a special error return value that can be returned
+// by eval nodes that does an early exit.
+type EvalEarlyExitError struct{}
+
+func (EvalEarlyExitError) Error() string { return "early exit" }
+
+// Eval evaluates the given EvalNode with the given context, properly
+// evaluating all args in the correct order.
+func Eval(n EvalNode, ctx EvalContext) (interface{}, error) {
+ // Call the lower level eval which doesn't understand early exit,
+ // and if we early exit, it isn't an error.
+ result, err := EvalRaw(n, ctx)
+ if err != nil {
+ if _, ok := err.(EvalEarlyExitError); ok {
+ return nil, nil
+ }
+ }
+
+ return result, err
+}
+
+// EvalRaw is like Eval except that it returns all errors, even if they
+// signal something normal such as EvalEarlyExitError.
+func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) {
+ path := "unknown"
+ if ctx != nil {
+ path = strings.Join(ctx.Path(), ".")
+ }
+
+ log.Printf("[DEBUG] %s: eval: %T", path, n)
+ output, err := n.Eval(ctx)
+ if err != nil {
+ if _, ok := err.(EvalEarlyExitError); ok {
+ log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err)
+ } else {
+ log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err)
+ }
+ }
+
+ return output, err
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
new file mode 100644
index 00000000..2f6a4973
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
@@ -0,0 +1,359 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strconv"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalApply is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalApply struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Diff **InstanceDiff
+ Provider *ResourceProvider
+ Output **InstanceState
+ CreateNew *bool
+ Error *error
+}
+
+// TODO: test
+func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
+ diff := *n.Diff
+ provider := *n.Provider
+ state := *n.State
+
+ // If we have no diff, we have nothing to do!
+ if diff.Empty() {
+ log.Printf(
+ "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id)
+ return nil, nil
+ }
+
+ // Remove any output values from the diff
+ for k, ad := range diff.CopyAttributes() {
+ if ad.Type == DiffAttrOutput {
+ diff.DelAttribute(k)
+ }
+ }
+
+ // If the state is nil, make it non-nil
+ if state == nil {
+ state = new(InstanceState)
+ }
+ state.init()
+
+ // Flag if we're creating a new instance
+ if n.CreateNew != nil {
+ *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew()
+ }
+
+ // With the completed diff, apply!
+ log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id)
+ state, err := provider.Apply(n.Info, state, diff)
+ if state == nil {
+ state = new(InstanceState)
+ }
+ state.init()
+
+ // Force the "id" attribute to be our ID
+ if state.ID != "" {
+ state.Attributes["id"] = state.ID
+ }
+
+ // If the value is the unknown variable value, then it is an error.
+ // In this case we record the error and remove it from the state
+ for ak, av := range state.Attributes {
+ if av == config.UnknownVariableValue {
+ err = multierror.Append(err, fmt.Errorf(
+ "Attribute with unknown value: %s", ak))
+ delete(state.Attributes, ak)
+ }
+ }
+
+ // Write the final state
+ if n.Output != nil {
+ *n.Output = state
+ }
+
+ // If there are no errors, then we append it to our output error
+ // if we have one, otherwise we just output it.
+ if err != nil {
+ if n.Error != nil {
+ helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error())
+ *n.Error = multierror.Append(*n.Error, helpfulErr)
+ } else {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalApplyPre is an EvalNode implementation that does the pre-Apply work
+type EvalApplyPre struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Diff **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+ diff := *n.Diff
+
+ // If the state is nil, make it non-nil
+ if state == nil {
+ state = new(InstanceState)
+ }
+ state.init()
+
+ {
+ // Call post-apply hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreApply(n.Info, state, diff)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalApplyPost is an EvalNode implementation that does the post-Apply work
+type EvalApplyPost struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Error *error
+}
+
+// TODO: test
+func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+
+ {
+ // Call post-apply hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostApply(n.Info, state, *n.Error)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, *n.Error
+}
+
+// EvalApplyProvisioners is an EvalNode implementation that executes
+// the provisioners for a resource.
+//
+// TODO(mitchellh): This should probably be split up into a more fine-grained
+// ApplyProvisioner (single) that is looped over.
+type EvalApplyProvisioners struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Resource *config.Resource
+ InterpResource *Resource
+ CreateNew *bool
+ Error *error
+
+ // When is the type of provisioner to run at this point
+ When config.ProvisionerWhen
+}
+
+// TODO: test
+func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+
+ if n.CreateNew != nil && !*n.CreateNew {
+ // If we're not creating a new resource, then don't run provisioners
+ return nil, nil
+ }
+
+ provs := n.filterProvisioners()
+ if len(provs) == 0 {
+ // We have no provisioners, so don't do anything
+ return nil, nil
+ }
+
+ // taint tells us whether to enable tainting.
+ taint := n.When == config.ProvisionerWhenCreate
+
+ if n.Error != nil && *n.Error != nil {
+ if taint {
+ state.Tainted = true
+ }
+
+ // We're already tainted, so just return out
+ return nil, nil
+ }
+
+ {
+ // Call pre hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreProvisionResource(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // If there are no errors, then we append it to our output error
+ // if we have one, otherwise we just output it.
+ err := n.apply(ctx, provs)
+ if err != nil {
+ if taint {
+ state.Tainted = true
+ }
+
+ if n.Error != nil {
+ *n.Error = multierror.Append(*n.Error, err)
+ } else {
+ return nil, err
+ }
+ }
+
+ {
+ // Call post hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostProvisionResource(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// filterProvisioners filters the provisioners on the resource to only
+// the provisioners specified by the "when" option.
+func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner {
+ // Fast path the zero case
+ if n.Resource == nil {
+ return nil
+ }
+
+ if len(n.Resource.Provisioners) == 0 {
+ return nil
+ }
+
+ result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners))
+ for _, p := range n.Resource.Provisioners {
+ if p.When == n.When {
+ result = append(result, p)
+ }
+ }
+
+ return result
+}
+
+func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error {
+ state := *n.State
+
+ // Store the original connection info, restore later
+ origConnInfo := state.Ephemeral.ConnInfo
+ defer func() {
+ state.Ephemeral.ConnInfo = origConnInfo
+ }()
+
+ for _, prov := range provs {
+ // Get the provisioner
+ provisioner := ctx.Provisioner(prov.Type)
+
+ // Interpolate the provisioner config
+ provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource)
+ if err != nil {
+ return err
+ }
+
+ // Interpolate the conn info, since it may contain variables
+ connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource)
+ if err != nil {
+ return err
+ }
+
+ // Merge the connection information
+ overlay := make(map[string]string)
+ if origConnInfo != nil {
+ for k, v := range origConnInfo {
+ overlay[k] = v
+ }
+ }
+ for k, v := range connInfo.Config {
+ switch vt := v.(type) {
+ case string:
+ overlay[k] = vt
+ case int64:
+ overlay[k] = strconv.FormatInt(vt, 10)
+ case int32:
+ overlay[k] = strconv.FormatInt(int64(vt), 10)
+ case int:
+ overlay[k] = strconv.FormatInt(int64(vt), 10)
+ case float32:
+ overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32)
+ case float64:
+ overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64)
+ case bool:
+ overlay[k] = strconv.FormatBool(vt)
+ default:
+ overlay[k] = fmt.Sprintf("%v", vt)
+ }
+ }
+ state.Ephemeral.ConnInfo = overlay
+
+ {
+ // Call pre hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreProvision(n.Info, prov.Type)
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ // The output function
+ outputFn := func(msg string) {
+ ctx.Hook(func(h Hook) (HookAction, error) {
+ h.ProvisionOutput(n.Info, prov.Type, msg)
+ return HookActionContinue, nil
+ })
+ }
+
+ // Invoke the Provisioner
+ output := CallbackUIOutput{OutputFn: outputFn}
+ applyErr := provisioner.Apply(&output, state, provConfig)
+
+ // Call post hook
+ hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostProvision(n.Info, prov.Type, applyErr)
+ })
+
+ // Handle the error before we deal with the hook
+ if applyErr != nil {
+ // Determine failure behavior
+ switch prov.OnFailure {
+ case config.ProvisionerOnFailureContinue:
+ log.Printf(
+ "[INFO] apply: %s [%s]: error during provision, continue requested",
+ n.Info.Id, prov.Type)
+
+ case config.ProvisionerOnFailureFail:
+ return applyErr
+ }
+ }
+
+ // Deal with the hook
+ if hookErr != nil {
+ return hookErr
+ }
+ }
+
+ return nil
+
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
new file mode 100644
index 00000000..715e79e1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
@@ -0,0 +1,38 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalPreventDestroy is an EvalNode implementation that returns an
+// error if a resource has PreventDestroy configured and the diff
+// would destroy the resource.
+type EvalCheckPreventDestroy struct {
+ Resource *config.Resource
+ ResourceId string
+ Diff **InstanceDiff
+}
+
+func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) {
+ if n.Diff == nil || *n.Diff == nil || n.Resource == nil {
+ return nil, nil
+ }
+
+ diff := *n.Diff
+ preventDestroy := n.Resource.Lifecycle.PreventDestroy
+
+ if diff.GetDestroy() && preventDestroy {
+ resourceId := n.ResourceId
+ if resourceId == "" {
+ resourceId = n.Resource.Id()
+ }
+
+ return nil, fmt.Errorf(preventDestroyErrStr, resourceId)
+ }
+
+ return nil, nil
+}
+
+const preventDestroyErrStr = `%s: the plan would destroy this resource, but it currently has lifecycle.prevent_destroy set to true. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or adjust the scope of the plan using the -target flag.`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
new file mode 100644
index 00000000..a1f815b7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
@@ -0,0 +1,84 @@
+package terraform
+
+import (
+ "sync"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalContext is the interface that is given to eval nodes to execute.
+type EvalContext interface {
+ // Stopped returns a channel that is closed when evaluation is stopped
+ // via Terraform.Context.Stop()
+ Stopped() <-chan struct{}
+
+ // Path is the current module path.
+ Path() []string
+
+ // Hook is used to call hook methods. The callback is called for each
+ // hook and should return the hook action to take and the error.
+ Hook(func(Hook) (HookAction, error)) error
+
+ // Input is the UIInput object for interacting with the UI.
+ Input() UIInput
+
+ // InitProvider initializes the provider with the given name and
+ // returns the implementation of the resource provider or an error.
+ //
+ // It is an error to initialize the same provider more than once.
+ InitProvider(string) (ResourceProvider, error)
+
+ // Provider gets the provider instance with the given name (already
+ // initialized) or returns nil if the provider isn't initialized.
+ Provider(string) ResourceProvider
+
+ // CloseProvider closes provider connections that aren't needed anymore.
+ CloseProvider(string) error
+
+ // ConfigureProvider configures the provider with the given
+ // configuration. This is a separate context call because this call
+ // is used to store the provider configuration for inheritance lookups
+ // with ParentProviderConfig().
+ ConfigureProvider(string, *ResourceConfig) error
+ SetProviderConfig(string, *ResourceConfig) error
+ ParentProviderConfig(string) *ResourceConfig
+
+ // ProviderInput and SetProviderInput are used to configure providers
+ // from user input.
+ ProviderInput(string) map[string]interface{}
+ SetProviderInput(string, map[string]interface{})
+
+ // InitProvisioner initializes the provisioner with the given name and
+ // returns the implementation of the resource provisioner or an error.
+ //
+ // It is an error to initialize the same provisioner more than once.
+ InitProvisioner(string) (ResourceProvisioner, error)
+
+ // Provisioner gets the provisioner instance with the given name (already
+ // initialized) or returns nil if the provisioner isn't initialized.
+ Provisioner(string) ResourceProvisioner
+
+ // CloseProvisioner closes provisioner connections that aren't needed
+ // anymore.
+ CloseProvisioner(string) error
+
+ // Interpolate takes the given raw configuration and completes
+ // the interpolations, returning the processed ResourceConfig.
+ //
+ // The resource argument is optional. If given, it is the resource
+ // that is currently being acted upon.
+ Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error)
+
+ // SetVariables sets the variables for the module within
+ // this context with the name n. This function call is additive:
+ // the second parameter is merged with any previous call.
+ SetVariables(string, map[string]interface{})
+
+ // Diff returns the global diff as well as the lock that should
+ // be used to modify that diff.
+ Diff() (*Diff, *sync.RWMutex)
+
+ // State returns the global state as well as the lock that should
+ // be used to modify that state.
+ State() (*State, *sync.RWMutex)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
new file mode 100644
index 00000000..3dcfb227
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
@@ -0,0 +1,347 @@
+package terraform
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// BuiltinEvalContext is an EvalContext implementation that is used by
+// Terraform by default.
+type BuiltinEvalContext struct {
+ // StopContext is the context used to track whether we're complete
+ StopContext context.Context
+
+ // PathValue is the Path that this context is operating within.
+ PathValue []string
+
+ // Interpolater setting below affect the interpolation of variables.
+ //
+ // The InterpolaterVars are the exact value for ${var.foo} values.
+ // The map is shared between all contexts and is a mapping of
+ // PATH to KEY to VALUE. Because it is shared by all contexts as well
+ // as the Interpolater itself, it is protected by InterpolaterVarLock
+ // which must be locked during any access to the map.
+ Interpolater *Interpolater
+ InterpolaterVars map[string]map[string]interface{}
+ InterpolaterVarLock *sync.Mutex
+
+ Components contextComponentFactory
+ Hooks []Hook
+ InputValue UIInput
+ ProviderCache map[string]ResourceProvider
+ ProviderConfigCache map[string]*ResourceConfig
+ ProviderInputConfig map[string]map[string]interface{}
+ ProviderLock *sync.Mutex
+ ProvisionerCache map[string]ResourceProvisioner
+ ProvisionerLock *sync.Mutex
+ DiffValue *Diff
+ DiffLock *sync.RWMutex
+ StateValue *State
+ StateLock *sync.RWMutex
+
+ once sync.Once
+}
+
+func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} {
+ // This can happen during tests. During tests, we just block forever.
+ if ctx.StopContext == nil {
+ return nil
+ }
+
+ return ctx.StopContext.Done()
+}
+
+func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
+ for _, h := range ctx.Hooks {
+ action, err := fn(h)
+ if err != nil {
+ return err
+ }
+
+ switch action {
+ case HookActionContinue:
+ continue
+ case HookActionHalt:
+ // Return an early exit error to trigger an early exit
+ log.Printf("[WARN] Early exit triggered by hook: %T", h)
+ return EvalEarlyExitError{}
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) Input() UIInput {
+ return ctx.InputValue
+}
+
+func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) {
+ ctx.once.Do(ctx.init)
+
+ // If we already initialized, it is an error
+ if p := ctx.Provider(n); p != nil {
+ return nil, fmt.Errorf("Provider '%s' already initialized", n)
+ }
+
+ // Warning: make sure to acquire these locks AFTER the call to Provider
+ // above, since it also acquires locks.
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+ key := PathCacheKey(providerPath)
+
+ typeName := strings.SplitN(n, ".", 2)[0]
+ p, err := ctx.Components.ResourceProvider(typeName, key)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx.ProviderCache[key] = p
+ return p, nil
+}
+
+func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider {
+ ctx.once.Do(ctx.init)
+
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+
+ return ctx.ProviderCache[PathCacheKey(providerPath)]
+}
+
+func (ctx *BuiltinEvalContext) CloseProvider(n string) error {
+ ctx.once.Do(ctx.init)
+
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+
+ var provider interface{}
+ provider = ctx.ProviderCache[PathCacheKey(providerPath)]
+ if provider != nil {
+ if p, ok := provider.(ResourceProviderCloser); ok {
+ delete(ctx.ProviderCache, PathCacheKey(providerPath))
+ return p.Close()
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) ConfigureProvider(
+ n string, cfg *ResourceConfig) error {
+ p := ctx.Provider(n)
+ if p == nil {
+ return fmt.Errorf("Provider '%s' not initialized", n)
+ }
+
+ if err := ctx.SetProviderConfig(n, cfg); err != nil {
+ return nil
+ }
+
+ return p.Configure(cfg)
+}
+
+func (ctx *BuiltinEvalContext) SetProviderConfig(
+ n string, cfg *ResourceConfig) error {
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+
+ // Save the configuration
+ ctx.ProviderLock.Lock()
+ ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg
+ ctx.ProviderLock.Unlock()
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} {
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ // Make a copy of the path so we can safely edit it
+ path := ctx.Path()
+ pathCopy := make([]string, len(path)+1)
+ copy(pathCopy, path)
+
+ // Go up the tree.
+ for i := len(path) - 1; i >= 0; i-- {
+ pathCopy[i+1] = n
+ k := PathCacheKey(pathCopy[:i+2])
+ if v, ok := ctx.ProviderInputConfig[k]; ok {
+ return v
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) {
+ providerPath := make([]string, len(ctx.Path())+1)
+ copy(providerPath, ctx.Path())
+ providerPath[len(providerPath)-1] = n
+
+ // Save the configuration
+ ctx.ProviderLock.Lock()
+ ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c
+ ctx.ProviderLock.Unlock()
+}
+
+func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig {
+ ctx.ProviderLock.Lock()
+ defer ctx.ProviderLock.Unlock()
+
+ // Make a copy of the path so we can safely edit it
+ path := ctx.Path()
+ pathCopy := make([]string, len(path)+1)
+ copy(pathCopy, path)
+
+ // Go up the tree.
+ for i := len(path) - 1; i >= 0; i-- {
+ pathCopy[i+1] = n
+ k := PathCacheKey(pathCopy[:i+2])
+ if v, ok := ctx.ProviderConfigCache[k]; ok {
+ return v
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) InitProvisioner(
+ n string) (ResourceProvisioner, error) {
+ ctx.once.Do(ctx.init)
+
+ // If we already initialized, it is an error
+ if p := ctx.Provisioner(n); p != nil {
+ return nil, fmt.Errorf("Provisioner '%s' already initialized", n)
+ }
+
+ // Warning: make sure to acquire these locks AFTER the call to Provisioner
+ // above, since it also acquires locks.
+ ctx.ProvisionerLock.Lock()
+ defer ctx.ProvisionerLock.Unlock()
+
+ provPath := make([]string, len(ctx.Path())+1)
+ copy(provPath, ctx.Path())
+ provPath[len(provPath)-1] = n
+ key := PathCacheKey(provPath)
+
+ p, err := ctx.Components.ResourceProvisioner(n, key)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx.ProvisionerCache[key] = p
+ return p, nil
+}
+
+func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner {
+ ctx.once.Do(ctx.init)
+
+ ctx.ProvisionerLock.Lock()
+ defer ctx.ProvisionerLock.Unlock()
+
+ provPath := make([]string, len(ctx.Path())+1)
+ copy(provPath, ctx.Path())
+ provPath[len(provPath)-1] = n
+
+ return ctx.ProvisionerCache[PathCacheKey(provPath)]
+}
+
+func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
+ ctx.once.Do(ctx.init)
+
+ ctx.ProvisionerLock.Lock()
+ defer ctx.ProvisionerLock.Unlock()
+
+ provPath := make([]string, len(ctx.Path())+1)
+ copy(provPath, ctx.Path())
+ provPath[len(provPath)-1] = n
+
+ var prov interface{}
+ prov = ctx.ProvisionerCache[PathCacheKey(provPath)]
+ if prov != nil {
+ if p, ok := prov.(ResourceProvisionerCloser); ok {
+ delete(ctx.ProvisionerCache, PathCacheKey(provPath))
+ return p.Close()
+ }
+ }
+
+ return nil
+}
+
+func (ctx *BuiltinEvalContext) Interpolate(
+ cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) {
+ if cfg != nil {
+ scope := &InterpolationScope{
+ Path: ctx.Path(),
+ Resource: r,
+ }
+
+ vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
+ if err != nil {
+ return nil, err
+ }
+
+ // Do the interpolation
+ if err := cfg.Interpolate(vs); err != nil {
+ return nil, err
+ }
+ }
+
+ result := NewResourceConfig(cfg)
+ result.interpolateForce()
+ return result, nil
+}
+
+func (ctx *BuiltinEvalContext) Path() []string {
+ return ctx.PathValue
+}
+
+func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) {
+ ctx.InterpolaterVarLock.Lock()
+ defer ctx.InterpolaterVarLock.Unlock()
+
+ path := make([]string, len(ctx.Path())+1)
+ copy(path, ctx.Path())
+ path[len(path)-1] = n
+ key := PathCacheKey(path)
+
+ vars := ctx.InterpolaterVars[key]
+ if vars == nil {
+ vars = make(map[string]interface{})
+ ctx.InterpolaterVars[key] = vars
+ }
+
+ for k, v := range vs {
+ vars[k] = v
+ }
+}
+
+func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) {
+ return ctx.DiffValue, ctx.DiffLock
+}
+
+func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) {
+ return ctx.StateValue, ctx.StateLock
+}
+
+func (ctx *BuiltinEvalContext) init() {
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
new file mode 100644
index 00000000..4f90d5b1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
@@ -0,0 +1,208 @@
+package terraform
+
+import (
+ "sync"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// MockEvalContext is a mock version of EvalContext that can be used
+// for tests.
+type MockEvalContext struct {
+ StoppedCalled bool
+ StoppedValue <-chan struct{}
+
+ HookCalled bool
+ HookHook Hook
+ HookError error
+
+ InputCalled bool
+ InputInput UIInput
+
+ InitProviderCalled bool
+ InitProviderName string
+ InitProviderProvider ResourceProvider
+ InitProviderError error
+
+ ProviderCalled bool
+ ProviderName string
+ ProviderProvider ResourceProvider
+
+ CloseProviderCalled bool
+ CloseProviderName string
+ CloseProviderProvider ResourceProvider
+
+ ProviderInputCalled bool
+ ProviderInputName string
+ ProviderInputConfig map[string]interface{}
+
+ SetProviderInputCalled bool
+ SetProviderInputName string
+ SetProviderInputConfig map[string]interface{}
+
+ ConfigureProviderCalled bool
+ ConfigureProviderName string
+ ConfigureProviderConfig *ResourceConfig
+ ConfigureProviderError error
+
+ SetProviderConfigCalled bool
+ SetProviderConfigName string
+ SetProviderConfigConfig *ResourceConfig
+
+ ParentProviderConfigCalled bool
+ ParentProviderConfigName string
+ ParentProviderConfigConfig *ResourceConfig
+
+ InitProvisionerCalled bool
+ InitProvisionerName string
+ InitProvisionerProvisioner ResourceProvisioner
+ InitProvisionerError error
+
+ ProvisionerCalled bool
+ ProvisionerName string
+ ProvisionerProvisioner ResourceProvisioner
+
+ CloseProvisionerCalled bool
+ CloseProvisionerName string
+ CloseProvisionerProvisioner ResourceProvisioner
+
+ InterpolateCalled bool
+ InterpolateConfig *config.RawConfig
+ InterpolateResource *Resource
+ InterpolateConfigResult *ResourceConfig
+ InterpolateError error
+
+ PathCalled bool
+ PathPath []string
+
+ SetVariablesCalled bool
+ SetVariablesModule string
+ SetVariablesVariables map[string]interface{}
+
+ DiffCalled bool
+ DiffDiff *Diff
+ DiffLock *sync.RWMutex
+
+ StateCalled bool
+ StateState *State
+ StateLock *sync.RWMutex
+}
+
+func (c *MockEvalContext) Stopped() <-chan struct{} {
+ c.StoppedCalled = true
+ return c.StoppedValue
+}
+
+func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
+ c.HookCalled = true
+ if c.HookHook != nil {
+ if _, err := fn(c.HookHook); err != nil {
+ return err
+ }
+ }
+
+ return c.HookError
+}
+
+func (c *MockEvalContext) Input() UIInput {
+ c.InputCalled = true
+ return c.InputInput
+}
+
+func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) {
+ c.InitProviderCalled = true
+ c.InitProviderName = n
+ return c.InitProviderProvider, c.InitProviderError
+}
+
+func (c *MockEvalContext) Provider(n string) ResourceProvider {
+ c.ProviderCalled = true
+ c.ProviderName = n
+ return c.ProviderProvider
+}
+
+func (c *MockEvalContext) CloseProvider(n string) error {
+ c.CloseProviderCalled = true
+ c.CloseProviderName = n
+ return nil
+}
+
+func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error {
+ c.ConfigureProviderCalled = true
+ c.ConfigureProviderName = n
+ c.ConfigureProviderConfig = cfg
+ return c.ConfigureProviderError
+}
+
+func (c *MockEvalContext) SetProviderConfig(
+ n string, cfg *ResourceConfig) error {
+ c.SetProviderConfigCalled = true
+ c.SetProviderConfigName = n
+ c.SetProviderConfigConfig = cfg
+ return nil
+}
+
+func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig {
+ c.ParentProviderConfigCalled = true
+ c.ParentProviderConfigName = n
+ return c.ParentProviderConfigConfig
+}
+
+func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} {
+ c.ProviderInputCalled = true
+ c.ProviderInputName = n
+ return c.ProviderInputConfig
+}
+
+func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) {
+ c.SetProviderInputCalled = true
+ c.SetProviderInputName = n
+ c.SetProviderInputConfig = cfg
+}
+
+func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) {
+ c.InitProvisionerCalled = true
+ c.InitProvisionerName = n
+ return c.InitProvisionerProvisioner, c.InitProvisionerError
+}
+
+func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner {
+ c.ProvisionerCalled = true
+ c.ProvisionerName = n
+ return c.ProvisionerProvisioner
+}
+
+func (c *MockEvalContext) CloseProvisioner(n string) error {
+ c.CloseProvisionerCalled = true
+ c.CloseProvisionerName = n
+ return nil
+}
+
+func (c *MockEvalContext) Interpolate(
+ config *config.RawConfig, resource *Resource) (*ResourceConfig, error) {
+ c.InterpolateCalled = true
+ c.InterpolateConfig = config
+ c.InterpolateResource = resource
+ return c.InterpolateConfigResult, c.InterpolateError
+}
+
+func (c *MockEvalContext) Path() []string {
+ c.PathCalled = true
+ return c.PathPath
+}
+
+func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) {
+ c.SetVariablesCalled = true
+ c.SetVariablesModule = n
+ c.SetVariablesVariables = vs
+}
+
+func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) {
+ c.DiffCalled = true
+ return c.DiffDiff, c.DiffLock
+}
+
+func (c *MockEvalContext) State() (*State, *sync.RWMutex) {
+ c.StateCalled = true
+ return c.StateState, c.StateLock
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
new file mode 100644
index 00000000..2ae56a75
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
@@ -0,0 +1,58 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state
+// when there is a resource count with zero/one boundary, i.e. fixing
+// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
+type EvalCountFixZeroOneBoundary struct {
+ Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) {
+ // Get the count, important for knowing whether we're supposed to
+ // be adding the zero, or trimming it.
+ count, err := n.Resource.Count()
+ if err != nil {
+ return nil, err
+ }
+
+ // Figure what to look for and what to replace it with
+ hunt := n.Resource.Id()
+ replace := hunt + ".0"
+ if count < 2 {
+ hunt, replace = replace, hunt
+ }
+
+ state, lock := ctx.State()
+
+ // Get a lock so we can access this instance and potentially make
+ // changes to it.
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs, ok := mod.Resources[hunt]
+ if !ok {
+ return nil, nil
+ }
+
+ // If the replacement key exists, we just keep both
+ if _, ok := mod.Resources[replace]; ok {
+ return nil, nil
+ }
+
+ mod.Resources[replace] = rs
+ delete(mod.Resources, hunt)
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
new file mode 100644
index 00000000..91e2b904
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
@@ -0,0 +1,78 @@
+package terraform
+
+import (
+ "log"
+)
+
+// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state
+// when there is a resource count with zero/one boundary, i.e. fixing
+// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
+//
+// This works on the global state.
+type EvalCountFixZeroOneBoundaryGlobal struct{}
+
+// TODO: test
+func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) {
+ // Get the state and lock it since we'll potentially modify it
+ state, lock := ctx.State()
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Prune the state since we require a clean state to work
+ state.prune()
+
+ // Go through each modules since the boundaries are restricted to a
+ // module scope.
+ for _, m := range state.Modules {
+ if err := n.fixModule(m); err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error {
+ // Counts keeps track of keys and their counts
+ counts := make(map[string]int)
+ for k, _ := range m.Resources {
+ // Parse the key
+ key, err := ParseResourceStateKey(k)
+ if err != nil {
+ return err
+ }
+
+ // Set the index to -1 so that we can keep count
+ key.Index = -1
+
+ // Increment
+ counts[key.String()]++
+ }
+
+ // Go through the counts and do the fixup for each resource
+ for raw, count := range counts {
+ // Search and replace this resource
+ search := raw
+ replace := raw + ".0"
+ if count < 2 {
+ search, replace = replace, search
+ }
+ log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace)
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs, ok := m.Resources[search]
+ if !ok {
+ continue
+ }
+
+ // If the replacement key exists, we just keep both
+ if _, ok := m.Resources[replace]; ok {
+ continue
+ }
+
+ m.Resources[replace] = rs
+ delete(m.Resources, search)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
new file mode 100644
index 00000000..54a8333e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
@@ -0,0 +1,25 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalCountCheckComputed is an EvalNode that checks if a resource count
+// is computed and errors if so. This can possibly happen across a
+// module boundary and we don't yet support this.
+type EvalCountCheckComputed struct {
+ Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalCountCheckComputed) Eval(ctx EvalContext) (interface{}, error) {
+ if n.Resource.RawCount.Value() == unknownValue() {
+ return nil, fmt.Errorf(
+ "%s: value of 'count' cannot be computed",
+ n.Resource.Id())
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
new file mode 100644
index 00000000..6f09526a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
@@ -0,0 +1,478 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalCompareDiff is an EvalNode implementation that compares two diffs
+// and errors if the diffs are not equal.
+type EvalCompareDiff struct {
+ Info *InstanceInfo
+ One, Two **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {
+ one, two := *n.One, *n.Two
+
+ // If either are nil, let them be empty
+ if one == nil {
+ one = new(InstanceDiff)
+ one.init()
+ }
+ if two == nil {
+ two = new(InstanceDiff)
+ two.init()
+ }
+ oneId, _ := one.GetAttribute("id")
+ twoId, _ := two.GetAttribute("id")
+ one.DelAttribute("id")
+ two.DelAttribute("id")
+ defer func() {
+ if oneId != nil {
+ one.SetAttribute("id", oneId)
+ }
+ if twoId != nil {
+ two.SetAttribute("id", twoId)
+ }
+ }()
+
+ if same, reason := one.Same(two); !same {
+ log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id)
+ log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason)
+ log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one)
+ log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two)
+ return nil, fmt.Errorf(
+ "%s: diffs didn't match during apply. This is a bug with "+
+ "Terraform and should be reported as a GitHub Issue.\n"+
+ "\n"+
+ "Please include the following information in your report:\n"+
+ "\n"+
+ " Terraform Version: %s\n"+
+ " Resource ID: %s\n"+
+ " Mismatch reason: %s\n"+
+ " Diff One (usually from plan): %#v\n"+
+ " Diff Two (usually from apply): %#v\n"+
+ "\n"+
+ "Also include as much context as you can about your config, state, "+
+ "and the steps you performed to trigger this error.\n",
+ n.Info.Id, Version, n.Info.Id, reason, one, two)
+ }
+
+ return nil, nil
+}
+
+// EvalDiff is an EvalNode implementation that does a refresh for
+// a resource.
+type EvalDiff struct {
+ Name string
+ Info *InstanceInfo
+ Config **ResourceConfig
+ Provider *ResourceProvider
+ Diff **InstanceDiff
+ State **InstanceState
+ OutputDiff **InstanceDiff
+ OutputState **InstanceState
+
+ // Resource is needed to fetch the ignore_changes list so we can
+ // filter user-requested ignored attributes from the diff.
+ Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+ config := *n.Config
+ provider := *n.Provider
+
+ // Call pre-diff hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreDiff(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // The state for the diff must never be nil
+ diffState := state
+ if diffState == nil {
+ diffState = new(InstanceState)
+ }
+ diffState.init()
+
+ // Diff!
+ diff, err := provider.Diff(n.Info, diffState, config)
+ if err != nil {
+ return nil, err
+ }
+ if diff == nil {
+ diff = new(InstanceDiff)
+ }
+
+ // Set DestroyDeposed if we have deposed instances
+ _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) {
+ if len(rs.Deposed) > 0 {
+ diff.DestroyDeposed = true
+ }
+
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Preserve the DestroyTainted flag
+ if n.Diff != nil {
+ diff.SetTainted((*n.Diff).GetDestroyTainted())
+ }
+
+ // Require a destroy if there is an ID and it requires new.
+ if diff.RequiresNew() && state != nil && state.ID != "" {
+ diff.SetDestroy(true)
+ }
+
+ // If we're creating a new resource, compute its ID
+ if diff.RequiresNew() || state == nil || state.ID == "" {
+ var oldID string
+ if state != nil {
+ oldID = state.Attributes["id"]
+ }
+
+ // Add diff to compute new ID
+ diff.init()
+ diff.SetAttribute("id", &ResourceAttrDiff{
+ Old: oldID,
+ NewComputed: true,
+ RequiresNew: true,
+ Type: DiffAttrOutput,
+ })
+ }
+
+ // filter out ignored resources
+ if err := n.processIgnoreChanges(diff); err != nil {
+ return nil, err
+ }
+
+ // Call post-refresh hook
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostDiff(n.Info, diff)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Update our output
+ *n.OutputDiff = diff
+
+ // Update the state if we care
+ if n.OutputState != nil {
+ *n.OutputState = state
+
+ // Merge our state so that the state is updated with our plan
+ if !diff.Empty() && n.OutputState != nil {
+ *n.OutputState = state.MergeDiff(diff)
+ }
+ }
+
+ return nil, nil
+}
+
+func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
+ if diff == nil || n.Resource == nil || n.Resource.Id() == "" {
+ return nil
+ }
+ ignoreChanges := n.Resource.Lifecycle.IgnoreChanges
+
+ if len(ignoreChanges) == 0 {
+ return nil
+ }
+
+ // If we're just creating the resource, we shouldn't alter the
+ // Diff at all
+ if diff.ChangeType() == DiffCreate {
+ return nil
+ }
+
+ // If the resource has been tainted then we don't process ignore changes
+ // since we MUST recreate the entire resource.
+ if diff.GetDestroyTainted() {
+ return nil
+ }
+
+ attrs := diff.CopyAttributes()
+
+ // get the complete set of keys we want to ignore
+ ignorableAttrKeys := make(map[string]bool)
+ for _, ignoredKey := range ignoreChanges {
+ for k := range attrs {
+ if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) {
+ ignorableAttrKeys[k] = true
+ }
+ }
+ }
+
+ // If the resource was being destroyed, check to see if we can ignore the
+ // reason for it being destroyed.
+ if diff.GetDestroy() {
+ for k, v := range attrs {
+ if k == "id" {
+ // id will always be changed if we intended to replace this instance
+ continue
+ }
+ if v.Empty() || v.NewComputed {
+ continue
+ }
+
+ // If any RequiresNew attribute isn't ignored, we need to keep the diff
+ // as-is to be able to replace the resource.
+ if v.RequiresNew && !ignorableAttrKeys[k] {
+ return nil
+ }
+ }
+
+ // Now that we know that we aren't replacing the instance, we can filter
+ // out all the empty and computed attributes. There may be a bunch of
+ // extraneous attribute diffs for the other non-requires-new attributes
+ // going from "" -> "configval" or "" -> "<computed>".
+ // We must make sure any flatmapped containers are filterred (or not) as a
+ // whole.
+ containers := groupContainers(diff)
+ keep := map[string]bool{}
+ for _, v := range containers {
+ if v.keepDiff() {
+ // At least one key has changes, so list all the sibling keys
+ // to keep in the diff.
+ for k := range v {
+ keep[k] = true
+ }
+ }
+ }
+
+ for k, v := range attrs {
+ if (v.Empty() || v.NewComputed) && !keep[k] {
+ ignorableAttrKeys[k] = true
+ }
+ }
+ }
+
+ // Here we undo the two reactions to RequireNew in EvalDiff - the "id"
+ // attribute diff and the Destroy boolean field
+ log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " +
+ "because after ignore_changes, this diff no longer requires replacement")
+ diff.DelAttribute("id")
+ diff.SetDestroy(false)
+
+ // If we didn't hit any of our early exit conditions, we can filter the diff.
+ for k := range ignorableAttrKeys {
+ log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s",
+ n.Resource.Id(), k)
+ diff.DelAttribute(k)
+ }
+
+ return nil
+}
+
+// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
+type flatAttrDiff map[string]*ResourceAttrDiff
+
+// we need to keep all keys if any of them have a diff
+func (f flatAttrDiff) keepDiff() bool {
+ for _, v := range f {
+ if !v.Empty() && !v.NewComputed {
+ return true
+ }
+ }
+ return false
+}
+
+// sets, lists and maps need to be compared for diff inclusion as a whole, so
+// group the flatmapped keys together for easier comparison.
+func groupContainers(d *InstanceDiff) map[string]flatAttrDiff {
+ isIndex := multiVal.MatchString
+ containers := map[string]flatAttrDiff{}
+ attrs := d.CopyAttributes()
+ // we need to loop once to find the index key
+ for k := range attrs {
+ if isIndex(k) {
+ // add the key, always including the final dot to fully qualify it
+ containers[k[:len(k)-1]] = flatAttrDiff{}
+ }
+ }
+
+ // loop again to find all the sub keys
+ for prefix, values := range containers {
+ for k, attrDiff := range attrs {
+ // we include the index value as well, since it could be part of the diff
+ if strings.HasPrefix(k, prefix) {
+ values[k] = attrDiff
+ }
+ }
+ }
+
+ return containers
+}
+
+// EvalDiffDestroy is an EvalNode implementation that returns a plain
+// destroy diff.
+type EvalDiffDestroy struct {
+ Info *InstanceInfo
+ State **InstanceState
+ Output **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+
+ // If there is no state or we don't have an ID, we're already destroyed
+ if state == nil || state.ID == "" {
+ return nil, nil
+ }
+
+ // Call pre-diff hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreDiff(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // The diff
+ diff := &InstanceDiff{Destroy: true}
+
+ // Call post-diff hook
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostDiff(n.Info, diff)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Update our output
+ *n.Output = diff
+
+ return nil, nil
+}
+
+// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalDiffDestroyModule struct {
+ Path []string
+}
+
+// TODO: test
+func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) {
+ diff, lock := ctx.Diff()
+
+ // Acquire the lock so that we can do this safely concurrently
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Write the diff
+ modDiff := diff.ModuleByPath(n.Path)
+ if modDiff == nil {
+ modDiff = diff.AddModule(n.Path)
+ }
+ modDiff.Destroy = true
+
+ return nil, nil
+}
+
+// EvalFilterDiff is an EvalNode implementation that filters the diff
+// according to some filter.
+type EvalFilterDiff struct {
+ // Input and output
+ Diff **InstanceDiff
+ Output **InstanceDiff
+
+ // Destroy, if true, will only include a destroy diff if it is set.
+ Destroy bool
+}
+
+func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) {
+ if *n.Diff == nil {
+ return nil, nil
+ }
+
+ input := *n.Diff
+ result := new(InstanceDiff)
+
+ if n.Destroy {
+ if input.GetDestroy() || input.RequiresNew() {
+ result.SetDestroy(true)
+ }
+ }
+
+ if n.Output != nil {
+ *n.Output = result
+ }
+
+ return nil, nil
+}
+
+// EvalReadDiff is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalReadDiff struct {
+ Name string
+ Diff **InstanceDiff
+}
+
+func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {
+ diff, lock := ctx.Diff()
+
+ // Acquire the lock so that we can do this safely concurrently
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Write the diff
+ modDiff := diff.ModuleByPath(ctx.Path())
+ if modDiff == nil {
+ return nil, nil
+ }
+
+ *n.Diff = modDiff.Resources[n.Name]
+
+ return nil, nil
+}
+
+// EvalWriteDiff is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalWriteDiff struct {
+ Name string
+ Diff **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {
+ diff, lock := ctx.Diff()
+
+ // The diff to write, if its empty it should write nil
+ var diffVal *InstanceDiff
+ if n.Diff != nil {
+ diffVal = *n.Diff
+ }
+ if diffVal.Empty() {
+ diffVal = nil
+ }
+
+ // Acquire the lock so that we can do this safely concurrently
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Write the diff
+ modDiff := diff.ModuleByPath(ctx.Path())
+ if modDiff == nil {
+ modDiff = diff.AddModule(ctx.Path())
+ }
+ if diffVal != nil {
+ modDiff.Resources[n.Name] = diffVal
+ } else {
+ delete(modDiff.Resources, n.Name)
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
new file mode 100644
index 00000000..470f798b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
@@ -0,0 +1,20 @@
+package terraform
+
+// EvalReturnError is an EvalNode implementation that returns an
+// error if it is present.
+//
+// This is useful for scenarios where an error has been captured by
+// another EvalNode (like EvalApply) for special EvalTree-based error
+// handling, and that handling has completed, so the error should be
+// returned normally.
+type EvalReturnError struct {
+ Error *error
+}
+
+func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) {
+ if n.Error == nil {
+ return nil, nil
+ }
+
+ return nil, *n.Error
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
new file mode 100644
index 00000000..711c625c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
@@ -0,0 +1,25 @@
+package terraform
+
+// EvalNodeFilterFunc is the callback used to replace a node with
+// another to node. To not do the replacement, just return the input node.
+type EvalNodeFilterFunc func(EvalNode) EvalNode
+
+// EvalNodeFilterable is an interface that can be implemented by
+// EvalNodes to allow filtering of sub-elements. Note that this isn't
+// a common thing to implement and you probably don't need it.
+type EvalNodeFilterable interface {
+ EvalNode
+ Filter(EvalNodeFilterFunc)
+}
+
+// EvalFilter runs the filter on the given node and returns the
+// final filtered value. This should be called rather than checking
+// the EvalNode directly since this will properly handle EvalNodeFilterables.
+func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode {
+ if f, ok := node.(EvalNodeFilterable); ok {
+ f.Filter(fn)
+ return node
+ }
+
+ return fn(node)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
new file mode 100644
index 00000000..1a55f024
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
@@ -0,0 +1,49 @@
+package terraform
+
+// EvalNodeOpFilterable is an interface that EvalNodes can implement
+// to be filterable by the operation that is being run on Terraform.
+type EvalNodeOpFilterable interface {
+ IncludeInOp(walkOperation) bool
+}
+
+// EvalNodeFilterOp returns a filter function that filters nodes that
+// include themselves in specific operations.
+func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc {
+ return func(n EvalNode) EvalNode {
+ include := true
+ if of, ok := n.(EvalNodeOpFilterable); ok {
+ include = of.IncludeInOp(op)
+ }
+ if include {
+ return n
+ }
+
+ return EvalNoop{}
+ }
+}
+
+// EvalOpFilter is an EvalNode implementation that is a proxy to
+// another node but filters based on the operation.
+type EvalOpFilter struct {
+ // Ops is the list of operations to include this node in.
+ Ops []walkOperation
+
+ // Node is the node to execute
+ Node EvalNode
+}
+
+// TODO: test
+func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) {
+ return EvalRaw(n.Node, ctx)
+}
+
+// EvalNodeOpFilterable impl.
+func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool {
+ for _, v := range n.Ops {
+ if v == op {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
new file mode 100644
index 00000000..d6b46a1f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
@@ -0,0 +1,26 @@
+package terraform
+
+// EvalIf is an EvalNode that is a conditional.
+type EvalIf struct {
+ If func(EvalContext) (bool, error)
+ Then EvalNode
+ Else EvalNode
+}
+
+// TODO: test
+func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) {
+ yes, err := n.If(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if yes {
+ return EvalRaw(n.Then, ctx)
+ } else {
+ if n.Else != nil {
+ return EvalRaw(n.Else, ctx)
+ }
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
new file mode 100644
index 00000000..62cc581f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
@@ -0,0 +1,76 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// EvalImportState is an EvalNode implementation that performs an
+// ImportState operation on a provider. This will return the imported
+// states but won't modify any actual state.
+type EvalImportState struct {
+ Provider *ResourceProvider
+ Info *InstanceInfo
+ Id string
+ Output *[]*InstanceState
+}
+
+// TODO: test
+func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) {
+ provider := *n.Provider
+
+ {
+ // Call pre-import hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreImportState(n.Info, n.Id)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Import!
+ state, err := provider.ImportState(n.Info, n.Id)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err)
+ }
+
+ if n.Output != nil {
+ *n.Output = state
+ }
+
+ {
+ // Call post-import hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostImportState(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalImportStateVerify verifies the state after ImportState and
+// after the refresh to make sure it is non-nil and valid.
+type EvalImportStateVerify struct {
+ Info *InstanceInfo
+ Id string
+ State **InstanceState
+}
+
+// TODO: test
+func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) {
+ state := *n.State
+ if state.Empty() {
+ return nil, fmt.Errorf(
+ "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+
+ "exist. Please verify the ID is correct. You cannot import non-existent\n"+
+ "resources using Terraform import.",
+ n.Info.HumanId(),
+ n.Id)
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
new file mode 100644
index 00000000..6825ff59
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
@@ -0,0 +1,24 @@
+package terraform
+
+import "github.com/hashicorp/terraform/config"
+
+// EvalInterpolate is an EvalNode implementation that takes a raw
+// configuration and interpolates it.
+type EvalInterpolate struct {
+ Config *config.RawConfig
+ Resource *Resource
+ Output **ResourceConfig
+}
+
+func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) {
+ rc, err := ctx.Interpolate(n.Config, n.Resource)
+ if err != nil {
+ return nil, err
+ }
+
+ if n.Output != nil {
+ *n.Output = rc
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
new file mode 100644
index 00000000..f4bc8225
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
@@ -0,0 +1,8 @@
+package terraform
+
+// EvalNoop is an EvalNode that does nothing.
+type EvalNoop struct{}
+
+func (EvalNoop) Eval(EvalContext) (interface{}, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
new file mode 100644
index 00000000..cf61781e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
@@ -0,0 +1,119 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalDeleteOutput is an EvalNode implementation that deletes an output
+// from the state.
+type EvalDeleteOutput struct {
+ Name string
+}
+
+// TODO: test
+func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+ if state == nil {
+ return nil, nil
+ }
+
+ // Get a write lock so we can access this instance
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Look for the module state. If we don't have one, create it.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ delete(mod.Outputs, n.Name)
+
+ return nil, nil
+}
+
+// EvalWriteOutput is an EvalNode implementation that writes the output
+// for the given name to the current state.
+type EvalWriteOutput struct {
+ Name string
+ Sensitive bool
+ Value *config.RawConfig
+}
+
+// TODO: test
+func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
+ cfg, err := ctx.Interpolate(n.Value, nil)
+ if err != nil {
+ // Log error but continue anyway
+ log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err)
+ }
+
+ state, lock := ctx.State()
+ if state == nil {
+ return nil, fmt.Errorf("cannot write state to nil state")
+ }
+
+ // Get a write lock so we can access this instance
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Look for the module state. If we don't have one, create it.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ mod = state.AddModule(ctx.Path())
+ }
+
+ // Get the value from the config
+ var valueRaw interface{} = config.UnknownVariableValue
+ if cfg != nil {
+ var ok bool
+ valueRaw, ok = cfg.Get("value")
+ if !ok {
+ valueRaw = ""
+ }
+ if cfg.IsComputed("value") {
+ valueRaw = config.UnknownVariableValue
+ }
+ }
+
+ switch valueTyped := valueRaw.(type) {
+ case string:
+ mod.Outputs[n.Name] = &OutputState{
+ Type: "string",
+ Sensitive: n.Sensitive,
+ Value: valueTyped,
+ }
+ case []interface{}:
+ mod.Outputs[n.Name] = &OutputState{
+ Type: "list",
+ Sensitive: n.Sensitive,
+ Value: valueTyped,
+ }
+ case map[string]interface{}:
+ mod.Outputs[n.Name] = &OutputState{
+ Type: "map",
+ Sensitive: n.Sensitive,
+ Value: valueTyped,
+ }
+ case []map[string]interface{}:
+ // an HCL map is multi-valued, so if this was read out of a config the
+ // map may still be in a slice.
+ if len(valueTyped) == 1 {
+ mod.Outputs[n.Name] = &OutputState{
+ Type: "map",
+ Sensitive: n.Sensitive,
+ Value: valueTyped[0],
+ }
+ break
+ }
+ return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map",
+ n.Name, valueTyped, len(valueTyped))
+ default:
+ return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped)
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
new file mode 100644
index 00000000..092fd18d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
@@ -0,0 +1,164 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalSetProviderConfig sets the parent configuration for a provider
+// without configuring that provider, validating it, etc.
+type EvalSetProviderConfig struct {
+ Provider string
+ Config **ResourceConfig
+}
+
+func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
+ return nil, ctx.SetProviderConfig(n.Provider, *n.Config)
+}
+
+// EvalBuildProviderConfig outputs a *ResourceConfig that is properly
+// merged with parents and inputs on top of what is configured in the file.
+type EvalBuildProviderConfig struct {
+ Provider string
+ Config **ResourceConfig
+ Output **ResourceConfig
+}
+
+func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
+ cfg := *n.Config
+
+ // If we have a configuration set, then merge that in
+ if input := ctx.ProviderInput(n.Provider); input != nil {
+ // "input" is a map of the subset of config values that were known
+ // during the input walk, set by EvalInputProvider. Note that
+ // in particular it does *not* include attributes that had
+ // computed values at input time; those appear *only* in
+ // "cfg" here.
+ rc, err := config.NewRawConfig(input)
+ if err != nil {
+ return nil, err
+ }
+
+ merged := cfg.raw.Merge(rc)
+ cfg = NewResourceConfig(merged)
+ }
+
+ // Get the parent configuration if there is one
+ if parent := ctx.ParentProviderConfig(n.Provider); parent != nil {
+ merged := cfg.raw.Merge(parent.raw)
+ cfg = NewResourceConfig(merged)
+ }
+
+ *n.Output = cfg
+ return nil, nil
+}
+
+// EvalConfigProvider is an EvalNode implementation that configures
+// a provider that is already initialized and retrieved.
+type EvalConfigProvider struct {
+ Provider string
+ Config **ResourceConfig
+}
+
+func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
+ return nil, ctx.ConfigureProvider(n.Provider, *n.Config)
+}
+
+// EvalInitProvider is an EvalNode implementation that initializes a provider
+// and returns nothing. The provider can be retrieved again with the
+// EvalGetProvider node.
+type EvalInitProvider struct {
+ Name string
+}
+
+func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) {
+ return ctx.InitProvider(n.Name)
+}
+
+// EvalCloseProvider is an EvalNode implementation that closes provider
+// connections that aren't needed anymore.
+type EvalCloseProvider struct {
+ Name string
+}
+
+func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) {
+ ctx.CloseProvider(n.Name)
+ return nil, nil
+}
+
+// EvalGetProvider is an EvalNode implementation that retrieves an already
+// initialized provider instance for the given name.
+type EvalGetProvider struct {
+ Name string
+ Output *ResourceProvider
+}
+
+func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) {
+ result := ctx.Provider(n.Name)
+ if result == nil {
+ return nil, fmt.Errorf("provider %s not initialized", n.Name)
+ }
+
+ if n.Output != nil {
+ *n.Output = result
+ }
+
+ return nil, nil
+}
+
+// EvalInputProvider is an EvalNode implementation that asks for input
+// for the given provider configurations.
+type EvalInputProvider struct {
+ Name string
+ Provider *ResourceProvider
+ Config **ResourceConfig
+}
+
+func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) {
+ // If we already configured this provider, then don't do this again
+ if v := ctx.ProviderInput(n.Name); v != nil {
+ return nil, nil
+ }
+
+ rc := *n.Config
+
+ // Wrap the input into a namespace
+ input := &PrefixUIInput{
+ IdPrefix: fmt.Sprintf("provider.%s", n.Name),
+ QueryPrefix: fmt.Sprintf("provider.%s.", n.Name),
+ UIInput: ctx.Input(),
+ }
+
+ // Go through each provider and capture the input necessary
+ // to satisfy it.
+ config, err := (*n.Provider).Input(input, rc)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error configuring %s: %s", n.Name, err)
+ }
+
+ // Set the input that we received so that child modules don't attempt
+ // to ask for input again.
+ if config != nil && len(config.Config) > 0 {
+ // This repository of provider input results on the context doesn't
+ // retain config.ComputedKeys, so we need to filter those out here
+ // in order that later users of this data won't try to use the unknown
+ // value placeholder as if it were a literal value. This map is just
+ // of known values we've been able to complete so far; dynamic stuff
+ // will be merged in by EvalBuildProviderConfig on subsequent
+ // (post-input) walks.
+ confMap := config.Config
+ if config.ComputedKeys != nil {
+ for _, key := range config.ComputedKeys {
+ delete(confMap, key)
+ }
+ }
+
+ ctx.SetProviderInput(n.Name, confMap)
+ } else {
+ ctx.SetProviderInput(n.Name, map[string]interface{}{})
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
new file mode 100644
index 00000000..89579c05
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
@@ -0,0 +1,47 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner
+// and returns nothing. The provisioner can be retrieved again with the
+// EvalGetProvisioner node.
+type EvalInitProvisioner struct {
+ Name string
+}
+
+func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+ return ctx.InitProvisioner(n.Name)
+}
+
+// EvalCloseProvisioner is an EvalNode implementation that closes provisioner
+// connections that aren't needed anymore.
+type EvalCloseProvisioner struct {
+ Name string
+}
+
+func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+ ctx.CloseProvisioner(n.Name)
+ return nil, nil
+}
+
+// EvalGetProvisioner is an EvalNode implementation that retrieves an already
+// initialized provisioner instance for the given name.
+type EvalGetProvisioner struct {
+ Name string
+ Output *ResourceProvisioner
+}
+
+func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+ result := ctx.Provisioner(n.Name)
+ if result == nil {
+ return nil, fmt.Errorf("provisioner %s not initialized", n.Name)
+ }
+
+ if n.Output != nil {
+ *n.Output = result
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
new file mode 100644
index 00000000..fb85a284
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
@@ -0,0 +1,139 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// EvalReadDataDiff is an EvalNode implementation that executes a data
+// resource's ReadDataDiff method to discover what attributes it exports.
+type EvalReadDataDiff struct {
+ Provider *ResourceProvider
+ Output **InstanceDiff
+ OutputState **InstanceState
+ Config **ResourceConfig
+ Info *InstanceInfo
+
+ // Set Previous when re-evaluating diff during apply, to ensure that
+ // the "Destroy" flag is preserved.
+ Previous **InstanceDiff
+}
+
+func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) {
+ // TODO: test
+
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreDiff(n.Info, nil)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ var diff *InstanceDiff
+
+ if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() {
+ // If we're re-diffing for a diff that was already planning to
+ // destroy, then we'll just continue with that plan.
+ diff = &InstanceDiff{Destroy: true}
+ } else {
+ provider := *n.Provider
+ config := *n.Config
+
+ var err error
+ diff, err = provider.ReadDataDiff(n.Info, config)
+ if err != nil {
+ return nil, err
+ }
+ if diff == nil {
+ diff = new(InstanceDiff)
+ }
+
+ // if id isn't explicitly set then it's always computed, because we're
+ // always "creating a new resource".
+ diff.init()
+ if _, ok := diff.Attributes["id"]; !ok {
+ diff.SetAttribute("id", &ResourceAttrDiff{
+ Old: "",
+ NewComputed: true,
+ RequiresNew: true,
+ Type: DiffAttrOutput,
+ })
+ }
+ }
+
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostDiff(n.Info, diff)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ *n.Output = diff
+
+ if n.OutputState != nil {
+ state := &InstanceState{}
+ *n.OutputState = state
+
+ // Apply the diff to the returned state, so the state includes
+ // any attribute values that are not computed.
+ if !diff.Empty() && n.OutputState != nil {
+ *n.OutputState = state.MergeDiff(diff)
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalReadDataApply is an EvalNode implementation that executes a data
+// resource's ReadDataApply method to read data from the data source.
+type EvalReadDataApply struct {
+ Provider *ResourceProvider
+ Output **InstanceState
+ Diff **InstanceDiff
+ Info *InstanceInfo
+}
+
+func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
+ // TODO: test
+ provider := *n.Provider
+ diff := *n.Diff
+
+ // If the diff is for *destroying* this resource then we'll
+ // just drop its state and move on, since data resources don't
+ // support an actual "destroy" action.
+ if diff != nil && diff.GetDestroy() {
+ if n.Output != nil {
+ *n.Output = nil
+ }
+ return nil, nil
+ }
+
+ // For the purpose of external hooks we present a data apply as a
+ // "Refresh" rather than an "Apply" because creating a data source
+ // is presented to users/callers as a "read" operation.
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ // We don't have a state yet, so we'll just give the hook an
+ // empty one to work with.
+ return h.PreRefresh(n.Info, &InstanceState{})
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ state, err := provider.ReadDataApply(n.Info, diff)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %s", n.Info.Id, err)
+ }
+
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostRefresh(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if n.Output != nil {
+ *n.Output = state
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
new file mode 100644
index 00000000..fa2b8126
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
@@ -0,0 +1,55 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+)
+
+// EvalRefresh is an EvalNode implementation that does a refresh for
+// a resource.
+type EvalRefresh struct {
+ Provider *ResourceProvider
+ State **InstanceState
+ Info *InstanceInfo
+ Output **InstanceState
+}
+
+// TODO: test
+func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
+ provider := *n.Provider
+ state := *n.State
+
+ // If we have no state, we don't do any refreshing
+ if state == nil {
+ log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id)
+ return nil, nil
+ }
+
+ // Call pre-refresh hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PreRefresh(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Refresh!
+ state, err = provider.Refresh(n.Info, state)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error())
+ }
+
+ // Call post-refresh hook
+ err = ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostRefresh(n.Info, state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if n.Output != nil {
+ *n.Output = state
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
new file mode 100644
index 00000000..5eca6782
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
@@ -0,0 +1,13 @@
+package terraform
+
+// EvalInstanceInfo is an EvalNode implementation that fills in the
+// InstanceInfo as much as it can.
+type EvalInstanceInfo struct {
+ Info *InstanceInfo
+}
+
+// TODO: test
+func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) {
+ n.Info.ModulePath = ctx.Path()
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
new file mode 100644
index 00000000..82d81782
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
@@ -0,0 +1,27 @@
+package terraform
+
+// EvalSequence is an EvalNode that evaluates in sequence.
+type EvalSequence struct {
+ Nodes []EvalNode
+}
+
+func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) {
+ for _, n := range n.Nodes {
+ if n == nil {
+ continue
+ }
+
+ if _, err := EvalRaw(n, ctx); err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalNodeFilterable impl.
+func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) {
+ for i, node := range n.Nodes {
+ n.Nodes[i] = fn(node)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
new file mode 100644
index 00000000..126a0e63
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
@@ -0,0 +1,324 @@
+package terraform
+
+import "fmt"
+
+// EvalReadState is an EvalNode implementation that reads the
+// primary InstanceState for a specific resource out of the state.
+type EvalReadState struct {
+ Name string
+ Output **InstanceState
+}
+
+func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) {
+ return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
+ return rs.Primary, nil
+ })
+}
+
+// EvalReadStateDeposed is an EvalNode implementation that reads the
+// deposed InstanceState for a specific resource out of the state
+type EvalReadStateDeposed struct {
+ Name string
+ Output **InstanceState
+ // Index indicates which instance in the Deposed list to target, or -1 for
+ // the last item.
+ Index int
+}
+
+func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
+ return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
+ // Get the index. If it is negative, then we get the last one
+ idx := n.Index
+ if idx < 0 {
+ idx = len(rs.Deposed) - 1
+ }
+ if idx >= 0 && idx < len(rs.Deposed) {
+ return rs.Deposed[idx], nil
+ } else {
+ return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs)
+ }
+ })
+}
+
+// Does the bulk of the work for the various flavors of ReadState eval nodes.
+// Each node just provides a reader function to get from the ResourceState to the
+// InstanceState, and this takes care of all the plumbing.
+func readInstanceFromState(
+ ctx EvalContext,
+ resourceName string,
+ output **InstanceState,
+ readerFn func(*ResourceState) (*InstanceState, error),
+) (*InstanceState, error) {
+ state, lock := ctx.State()
+
+ // Get a read lock so we can access this instance
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs := mod.Resources[resourceName]
+ if rs == nil {
+ return nil, nil
+ }
+
+ // Use the delegate function to get the instance state from the resource state
+ is, err := readerFn(rs)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write the result to the output pointer
+ if output != nil {
+ *output = is
+ }
+
+ return is, nil
+}
+
+// EvalRequireState is an EvalNode implementation that early exits
+// if the state doesn't have an ID.
+type EvalRequireState struct {
+ State **InstanceState
+}
+
+func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
+ if n.State == nil {
+ return nil, EvalEarlyExitError{}
+ }
+
+ state := *n.State
+ if state == nil || state.ID == "" {
+ return nil, EvalEarlyExitError{}
+ }
+
+ return nil, nil
+}
+
+// EvalUpdateStateHook is an EvalNode implementation that calls the
+// PostStateUpdate hook with the current state.
+type EvalUpdateStateHook struct{}
+
+func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+
+ // Get a full lock. Even calling something like WriteState can modify
+ // (prune) the state, so we need the full lock.
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Call the hook
+ err := ctx.Hook(func(h Hook) (HookAction, error) {
+ return h.PostStateUpdate(state)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// EvalWriteState is an EvalNode implementation that writes the
+// primary InstanceState for a specific resource into the state.
+type EvalWriteState struct {
+ Name string
+ ResourceType string
+ Provider string
+ Dependencies []string
+ State **InstanceState
+}
+
+func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) {
+ return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
+ func(rs *ResourceState) error {
+ rs.Primary = *n.State
+ return nil
+ },
+ )
+}
+
+// EvalWriteStateDeposed is an EvalNode implementation that writes
+// an InstanceState out to the Deposed list of a resource in the state.
+type EvalWriteStateDeposed struct {
+ Name string
+ ResourceType string
+ Provider string
+ Dependencies []string
+ State **InstanceState
+ // Index indicates which instance in the Deposed list to target, or -1 to append.
+ Index int
+}
+
+func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
+ return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
+ func(rs *ResourceState) error {
+ if n.Index == -1 {
+ rs.Deposed = append(rs.Deposed, *n.State)
+ } else {
+ rs.Deposed[n.Index] = *n.State
+ }
+ return nil
+ },
+ )
+}
+
+// Pulls together the common tasks of the EvalWriteState nodes. All the args
+// are passed directly down from the EvalNode along with a `writer` function
+// which is yielded the *ResourceState and is responsible for writing an
+// InstanceState to the proper field in the ResourceState.
+func writeInstanceToState(
+ ctx EvalContext,
+ resourceName string,
+ resourceType string,
+ provider string,
+ dependencies []string,
+ writerFn func(*ResourceState) error,
+) (*InstanceState, error) {
+ state, lock := ctx.State()
+ if state == nil {
+ return nil, fmt.Errorf("cannot write state to nil state")
+ }
+
+ // Get a write lock so we can access this instance
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Look for the module state. If we don't have one, create it.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ mod = state.AddModule(ctx.Path())
+ }
+
+ // Look for the resource state.
+ rs := mod.Resources[resourceName]
+ if rs == nil {
+ rs = &ResourceState{}
+ rs.init()
+ mod.Resources[resourceName] = rs
+ }
+ rs.Type = resourceType
+ rs.Dependencies = dependencies
+ rs.Provider = provider
+
+ if err := writerFn(rs); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+// EvalClearPrimaryState is an EvalNode implementation that clears the primary
+// instance from a resource state.
+type EvalClearPrimaryState struct {
+ Name string
+}
+
+func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+
+ // Get a read lock so we can access this instance
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs := mod.Resources[n.Name]
+ if rs == nil {
+ return nil, nil
+ }
+
+ // Clear primary from the resource state
+ rs.Primary = nil
+
+ return nil, nil
+}
+
+// EvalDeposeState is an EvalNode implementation that takes the primary
+// out of a state and makes it Deposed. This is done at the beginning of
+// create-before-destroy calls so that the create can create while preserving
+// the old state of the to-be-destroyed resource.
+type EvalDeposeState struct {
+ Name string
+}
+
+// TODO: test
+func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+
+ // Get a read lock so we can access this instance
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs := mod.Resources[n.Name]
+ if rs == nil {
+ return nil, nil
+ }
+
+ // If we don't have a primary, we have nothing to depose
+ if rs.Primary == nil {
+ return nil, nil
+ }
+
+ // Depose
+ rs.Deposed = append(rs.Deposed, rs.Primary)
+ rs.Primary = nil
+
+ return nil, nil
+}
+
+// EvalUndeposeState is an EvalNode implementation that reads the
+// InstanceState for a specific resource out of the state.
+type EvalUndeposeState struct {
+ Name string
+ State **InstanceState
+}
+
+// TODO: test
+func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) {
+ state, lock := ctx.State()
+
+ // Get a read lock so we can access this instance
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Look for the module state. If we don't have one, then it doesn't matter.
+ mod := state.ModuleByPath(ctx.Path())
+ if mod == nil {
+ return nil, nil
+ }
+
+ // Look for the resource state. If we don't have one, then it is okay.
+ rs := mod.Resources[n.Name]
+ if rs == nil {
+ return nil, nil
+ }
+
+ // If we don't have any desposed resource, then we don't have anything to do
+ if len(rs.Deposed) == 0 {
+ return nil, nil
+ }
+
+ // Undepose
+ idx := len(rs.Deposed) - 1
+ rs.Primary = rs.Deposed[idx]
+ rs.Deposed[idx] = *n.State
+
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
new file mode 100644
index 00000000..478aa640
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
@@ -0,0 +1,227 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/mitchellh/mapstructure"
+)
+
+// EvalValidateError is the error structure returned if there were
+// validation errors.
+type EvalValidateError struct {
+ Warnings []string
+ Errors []error
+}
+
+func (e *EvalValidateError) Error() string {
+ return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors)
+}
+
+// EvalValidateCount is an EvalNode implementation that validates
+// the count of a resource.
+type EvalValidateCount struct {
+ Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) {
+ var count int
+ var errs []error
+ var err error
+ if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil {
+ errs = append(errs, fmt.Errorf(
+ "Failed to interpolate count: %s", err))
+ goto RETURN
+ }
+
+ count, err = n.Resource.Count()
+ if err != nil {
+ // If we can't get the count during validation, then
+ // just replace it with the number 1.
+ c := n.Resource.RawCount.Config()
+ c[n.Resource.RawCount.Key] = "1"
+ count = 1
+ }
+ err = nil
+
+ if count < 0 {
+ errs = append(errs, fmt.Errorf(
+ "Count is less than zero: %d", count))
+ }
+
+RETURN:
+ if len(errs) != 0 {
+ err = &EvalValidateError{
+ Errors: errs,
+ }
+ }
+ return nil, err
+}
+
+// EvalValidateProvider is an EvalNode implementation that validates
+// the configuration of a resource.
+type EvalValidateProvider struct {
+ Provider *ResourceProvider
+ Config **ResourceConfig
+}
+
+func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
+ provider := *n.Provider
+ config := *n.Config
+
+ warns, errs := provider.Validate(config)
+ if len(warns) == 0 && len(errs) == 0 {
+ return nil, nil
+ }
+
+ return nil, &EvalValidateError{
+ Warnings: warns,
+ Errors: errs,
+ }
+}
+
+// EvalValidateProvisioner is an EvalNode implementation that validates
+// the configuration of a resource.
+type EvalValidateProvisioner struct {
+ Provisioner *ResourceProvisioner
+ Config **ResourceConfig
+ ConnConfig **ResourceConfig
+}
+
+func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+ provisioner := *n.Provisioner
+ config := *n.Config
+ var warns []string
+ var errs []error
+
+ {
+ // Validate the provisioner's own config first
+ w, e := provisioner.Validate(config)
+ warns = append(warns, w...)
+ errs = append(errs, e...)
+ }
+
+ {
+ // Now validate the connection config, which might either be from
+ // the provisioner block itself or inherited from the resource's
+ // shared connection info.
+ w, e := n.validateConnConfig(*n.ConnConfig)
+ warns = append(warns, w...)
+ errs = append(errs, e...)
+ }
+
+ if len(warns) == 0 && len(errs) == 0 {
+ return nil, nil
+ }
+
+ return nil, &EvalValidateError{
+ Warnings: warns,
+ Errors: errs,
+ }
+}
+
+func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) {
+ // We can't comprehensively validate the connection config since its
+ // final structure is decided by the communicator and we can't instantiate
+ // that until we have a complete instance state. However, we *can* catch
+ // configuration keys that are not valid for *any* communicator, catching
+ // typos early rather than waiting until we actually try to run one of
+ // the resource's provisioners.
+
+ type connConfigSuperset struct {
+ // All attribute types are interface{} here because at this point we
+ // may still have unresolved interpolation expressions, which will
+ // appear as strings regardless of the final goal type.
+
+ Type interface{} `mapstructure:"type"`
+ User interface{} `mapstructure:"user"`
+ Password interface{} `mapstructure:"password"`
+ Host interface{} `mapstructure:"host"`
+ Port interface{} `mapstructure:"port"`
+ Timeout interface{} `mapstructure:"timeout"`
+ ScriptPath interface{} `mapstructure:"script_path"`
+
+ // For type=ssh only (enforced in ssh communicator)
+ PrivateKey interface{} `mapstructure:"private_key"`
+ Agent interface{} `mapstructure:"agent"`
+ BastionHost interface{} `mapstructure:"bastion_host"`
+ BastionPort interface{} `mapstructure:"bastion_port"`
+ BastionUser interface{} `mapstructure:"bastion_user"`
+ BastionPassword interface{} `mapstructure:"bastion_password"`
+ BastionPrivateKey interface{} `mapstructure:"bastion_private_key"`
+
+ // For type=winrm only (enforced in winrm communicator)
+ HTTPS interface{} `mapstructure:"https"`
+ Insecure interface{} `mapstructure:"insecure"`
+ CACert interface{} `mapstructure:"cacert"`
+ }
+
+ var metadata mapstructure.Metadata
+ decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
+ Metadata: &metadata,
+ Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys
+ })
+ if err != nil {
+ // should never happen
+ errs = append(errs, err)
+ return
+ }
+
+ if err := decoder.Decode(connConfig.Config); err != nil {
+ errs = append(errs, err)
+ return
+ }
+
+ for _, attrName := range metadata.Unused {
+ errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName))
+ }
+ return
+}
+
+// EvalValidateResource is an EvalNode implementation that validates
+// the configuration of a resource.
+type EvalValidateResource struct {
+ Provider *ResourceProvider
+ Config **ResourceConfig
+ ResourceName string
+ ResourceType string
+ ResourceMode config.ResourceMode
+
+ // IgnoreWarnings means that warnings will not be passed through. This allows
+ // "just-in-time" passes of validation to continue execution through warnings.
+ IgnoreWarnings bool
+}
+
+func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
+ provider := *n.Provider
+ cfg := *n.Config
+ var warns []string
+ var errs []error
+ // Provider entry point varies depending on resource mode, because
+ // managed resources and data resources are two distinct concepts
+ // in the provider abstraction.
+ switch n.ResourceMode {
+ case config.ManagedResourceMode:
+ warns, errs = provider.ValidateResource(n.ResourceType, cfg)
+ case config.DataResourceMode:
+ warns, errs = provider.ValidateDataSource(n.ResourceType, cfg)
+ }
+
+ // If the resource name doesn't match the name regular
+ // expression, show an error.
+ if !config.NameRegexp.Match([]byte(n.ResourceName)) {
+ errs = append(errs, fmt.Errorf(
+ "%s: resource name can only contain letters, numbers, "+
+ "dashes, and underscores.", n.ResourceName))
+ }
+
+ if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 {
+ return nil, nil
+ }
+
+ return nil, &EvalValidateError{
+ Warnings: warns,
+ Errors: errs,
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
new file mode 100644
index 00000000..ae4436a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
@@ -0,0 +1,74 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// EvalValidateResourceSelfRef is an EvalNode implementation that validates that
+// a configuration doesn't contain a reference to the resource itself.
+//
+// This must be done prior to interpolating configuration in order to avoid
+// any infinite loop scenarios.
+type EvalValidateResourceSelfRef struct {
+ Addr **ResourceAddress
+ Config **config.RawConfig
+}
+
+func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) {
+ addr := *n.Addr
+ conf := *n.Config
+
+ // Go through the variables and find self references
+ var errs []error
+ for k, raw := range conf.Variables {
+ rv, ok := raw.(*config.ResourceVariable)
+ if !ok {
+ continue
+ }
+
+ // Build an address from the variable
+ varAddr := &ResourceAddress{
+ Path: addr.Path,
+ Mode: rv.Mode,
+ Type: rv.Type,
+ Name: rv.Name,
+ Index: rv.Index,
+ InstanceType: TypePrimary,
+ }
+
+ // If the variable access is a multi-access (*), then we just
+ // match the index so that we'll match our own addr if everything
+ // else matches.
+ if rv.Multi && rv.Index == -1 {
+ varAddr.Index = addr.Index
+ }
+
+ // This is a weird thing where ResourceAddres has index "-1" when
+ // index isn't set at all. This means index "0" for resource access.
+ // So, if we have this scenario, just set our varAddr to -1 so it
+ // matches.
+ if addr.Index == -1 && varAddr.Index == 0 {
+ varAddr.Index = -1
+ }
+
+ // If the addresses match, then this is a self reference
+ if varAddr.Equals(addr) && varAddr.Index == addr.Index {
+ errs = append(errs, fmt.Errorf(
+ "%s: self reference not allowed: %q",
+ addr, k))
+ }
+ }
+
+ // If no errors, no errors!
+ if len(errs) == 0 {
+ return nil, nil
+ }
+
+ // Wrap the errors in the proper wrapper so we can handle validation
+ // formatting properly upstream.
+ return nil, &EvalValidateError{
+ Errors: errs,
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
new file mode 100644
index 00000000..e39a33c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
@@ -0,0 +1,279 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/hilmapstructure"
+)
+
+// EvalTypeCheckVariable is an EvalNode which ensures that the variable
+// values which are assigned as inputs to a module (including the root)
+// match the types which are either declared for the variables explicitly
+// or inferred from the default values.
+//
+// In order to achieve this three things are required:
+// - a map of the proposed variable values
+// - the configuration tree of the module in which the variable is
+// declared
+// - the path to the module (so we know which part of the tree to
+// compare the values against).
+type EvalTypeCheckVariable struct {
+ Variables map[string]interface{}
+ ModulePath []string
+ ModuleTree *module.Tree
+}
+
+func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) {
+ currentTree := n.ModuleTree
+ for _, pathComponent := range n.ModulePath[1:] {
+ currentTree = currentTree.Children()[pathComponent]
+ }
+ targetConfig := currentTree.Config()
+
+ prototypes := make(map[string]config.VariableType)
+ for _, variable := range targetConfig.Variables {
+ prototypes[variable.Name] = variable.Type()
+ }
+
+ // Only display a module in an error message if we are not in the root module
+ modulePathDescription := fmt.Sprintf(" in module %s", strings.Join(n.ModulePath[1:], "."))
+ if len(n.ModulePath) == 1 {
+ modulePathDescription = ""
+ }
+
+ for name, declaredType := range prototypes {
+ proposedValue, ok := n.Variables[name]
+ if !ok {
+ // This means the default value should be used as no overriding value
+ // has been set. Therefore we should continue as no check is necessary.
+ continue
+ }
+
+ if proposedValue == config.UnknownVariableValue {
+ continue
+ }
+
+ switch declaredType {
+ case config.VariableTypeString:
+ switch proposedValue.(type) {
+ case string:
+ continue
+ default:
+ return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
+ name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
+ }
+ case config.VariableTypeMap:
+ switch proposedValue.(type) {
+ case map[string]interface{}:
+ continue
+ default:
+ return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
+ name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
+ }
+ case config.VariableTypeList:
+ switch proposedValue.(type) {
+ case []interface{}:
+ continue
+ default:
+ return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
+ name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
+ }
+ default:
+ return nil, fmt.Errorf("variable %s%s should be type %s, got type string",
+ name, modulePathDescription, declaredType.Printable())
+ }
+ }
+
+ return nil, nil
+}
+
+// EvalSetVariables is an EvalNode implementation that sets the variables
+// explicitly for interpolation later.
+type EvalSetVariables struct {
+ Module *string
+ Variables map[string]interface{}
+}
+
+// TODO: test
+func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) {
+ ctx.SetVariables(*n.Module, n.Variables)
+ return nil, nil
+}
+
+// EvalVariableBlock is an EvalNode implementation that evaluates the
+// given configuration, and uses the final values as a way to set the
+// mapping.
+type EvalVariableBlock struct {
+ Config **ResourceConfig
+ VariableValues map[string]interface{}
+}
+
+func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) {
+ // Clear out the existing mapping
+ for k, _ := range n.VariableValues {
+ delete(n.VariableValues, k)
+ }
+
+ // Get our configuration
+ rc := *n.Config
+ for k, v := range rc.Config {
+ vKind := reflect.ValueOf(v).Type().Kind()
+
+ switch vKind {
+ case reflect.Slice:
+ var vSlice []interface{}
+ if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil {
+ n.VariableValues[k] = vSlice
+ continue
+ }
+ case reflect.Map:
+ var vMap map[string]interface{}
+ if err := hilmapstructure.WeakDecode(v, &vMap); err == nil {
+ n.VariableValues[k] = vMap
+ continue
+ }
+ default:
+ var vString string
+ if err := hilmapstructure.WeakDecode(v, &vString); err == nil {
+ n.VariableValues[k] = vString
+ continue
+ }
+ }
+
+ return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k)
+ }
+
+ for _, path := range rc.ComputedKeys {
+ log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path)
+ err := n.setUnknownVariableValueForPath(path)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
+func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error {
+ pathComponents := strings.Split(path, ".")
+
+ if len(pathComponents) < 1 {
+ return fmt.Errorf("No path comoponents in %s", path)
+ }
+
+ if len(pathComponents) == 1 {
+ // Special case the "top level" since we know the type
+ if _, ok := n.VariableValues[pathComponents[0]]; !ok {
+ n.VariableValues[pathComponents[0]] = config.UnknownVariableValue
+ }
+ return nil
+ }
+
+ // Otherwise find the correct point in the tree and then set to unknown
+ var current interface{} = n.VariableValues[pathComponents[0]]
+ for i := 1; i < len(pathComponents); i++ {
+ switch tCurrent := current.(type) {
+ case []interface{}:
+ index, err := strconv.Atoi(pathComponents[i])
+ if err != nil {
+ return fmt.Errorf("Cannot convert %s to slice index in path %s",
+ pathComponents[i], path)
+ }
+ current = tCurrent[index]
+ case []map[string]interface{}:
+ index, err := strconv.Atoi(pathComponents[i])
+ if err != nil {
+ return fmt.Errorf("Cannot convert %s to slice index in path %s",
+ pathComponents[i], path)
+ }
+ current = tCurrent[index]
+ case map[string]interface{}:
+ if val, hasVal := tCurrent[pathComponents[i]]; hasVal {
+ current = val
+ continue
+ }
+
+ tCurrent[pathComponents[i]] = config.UnknownVariableValue
+ break
+ }
+ }
+
+ return nil
+}
+
+// EvalCoerceMapVariable is an EvalNode implementation that recognizes a
+// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a
+// bare map literal is indistinguishable from a list of maps w/ one element.
+//
+// We take all the same inputs as EvalTypeCheckVariable above, since we need
+// both the target type and the proposed value in order to properly coerce.
+type EvalCoerceMapVariable struct {
+ Variables map[string]interface{}
+ ModulePath []string
+ ModuleTree *module.Tree
+}
+
+// Eval implements the EvalNode interface. See EvalCoerceMapVariable for
+// details.
+func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) {
+ currentTree := n.ModuleTree
+ for _, pathComponent := range n.ModulePath[1:] {
+ currentTree = currentTree.Children()[pathComponent]
+ }
+ targetConfig := currentTree.Config()
+
+ prototypes := make(map[string]config.VariableType)
+ for _, variable := range targetConfig.Variables {
+ prototypes[variable.Name] = variable.Type()
+ }
+
+ for name, declaredType := range prototypes {
+ if declaredType != config.VariableTypeMap {
+ continue
+ }
+
+ proposedValue, ok := n.Variables[name]
+ if !ok {
+ continue
+ }
+
+ if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 {
+ if m, ok := list[0].(map[string]interface{}); ok {
+ log.Printf("[DEBUG] EvalCoerceMapVariable: "+
+ "Coercing single element list into map: %#v", m)
+ n.Variables[name] = m
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+// hclTypeName returns the name of the type that would represent this value in
+// a config file, or falls back to the Go type name if there's no corresponding
+// HCL type. This is used for formatted output, not for comparing types.
+func hclTypeName(i interface{}) string {
+ switch k := reflect.Indirect(reflect.ValueOf(i)).Kind(); k {
+ case reflect.Bool:
+ return "boolean"
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
+ return "number"
+ case reflect.Array, reflect.Slice:
+ return "list"
+ case reflect.Map:
+ return "map"
+ case reflect.String:
+ return "string"
+ default:
+ // fall back to the Go type if there's no match
+ return k.String()
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
new file mode 100644
index 00000000..00392efe
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
@@ -0,0 +1,119 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config"
+)
+
+// ProviderEvalTree returns the evaluation tree for initializing and
+// configuring providers.
+func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
+ var provider ResourceProvider
+ var resourceConfig *ResourceConfig
+
+ seq := make([]EvalNode, 0, 5)
+ seq = append(seq, &EvalInitProvider{Name: n})
+
+ // Input stuff
+ seq = append(seq, &EvalOpFilter{
+ Ops: []walkOperation{walkInput, walkImport},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n,
+ Output: &provider,
+ },
+ &EvalInterpolate{
+ Config: config,
+ Output: &resourceConfig,
+ },
+ &EvalBuildProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ Output: &resourceConfig,
+ },
+ &EvalInputProvider{
+ Name: n,
+ Provider: &provider,
+ Config: &resourceConfig,
+ },
+ },
+ },
+ })
+
+ seq = append(seq, &EvalOpFilter{
+ Ops: []walkOperation{walkValidate},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n,
+ Output: &provider,
+ },
+ &EvalInterpolate{
+ Config: config,
+ Output: &resourceConfig,
+ },
+ &EvalBuildProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ Output: &resourceConfig,
+ },
+ &EvalValidateProvider{
+ Provider: &provider,
+ Config: &resourceConfig,
+ },
+ &EvalSetProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ },
+ },
+ },
+ })
+
+ // Apply stuff
+ seq = append(seq, &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n,
+ Output: &provider,
+ },
+ &EvalInterpolate{
+ Config: config,
+ Output: &resourceConfig,
+ },
+ &EvalBuildProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ Output: &resourceConfig,
+ },
+ &EvalSetProviderConfig{
+ Provider: n,
+ Config: &resourceConfig,
+ },
+ },
+ },
+ })
+
+ // We configure on everything but validate, since validate may
+ // not have access to all the variables.
+ seq = append(seq, &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalConfigProvider{
+ Provider: n,
+ Config: &resourceConfig,
+ },
+ },
+ },
+ })
+
+ return &EvalSequence{Nodes: seq}
+}
+
+// CloseProviderEvalTree returns the evaluation tree for closing
+// provider connections that aren't needed anymore.
+func CloseProviderEvalTree(n string) EvalNode {
+ return &EvalCloseProvider{Name: n}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go
new file mode 100644
index 00000000..48ce6a33
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go
@@ -0,0 +1,172 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "runtime/debug"
+ "strings"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// RootModuleName is the name given to the root module implicitly.
+const RootModuleName = "root"
+
+// RootModulePath is the path for the root module.
+var RootModulePath = []string{RootModuleName}
+
+// Graph represents the graph that Terraform uses to represent resources
+// and their dependencies.
+type Graph struct {
+ // Graph is the actual DAG. This is embedded so you can call the DAG
+ // methods directly.
+ dag.AcyclicGraph
+
+ // Path is the path in the module tree that this Graph represents.
+ // The root is represented by a single element list containing
+ // RootModuleName
+ Path []string
+
+ // debugName is a name for reference in the debug output. This is usually
+ // to indicate what topmost builder was, and if this graph is a shadow or
+ // not.
+ debugName string
+}
+
+func (g *Graph) DirectedGraph() dag.Grapher {
+ return &g.AcyclicGraph
+}
+
+// Walk walks the graph with the given walker for callbacks. The graph
+// will be walked with full parallelism, so the walker should expect
+// to be called in concurrently.
+func (g *Graph) Walk(walker GraphWalker) error {
+ return g.walk(walker)
+}
+
+func (g *Graph) walk(walker GraphWalker) error {
+ // The callbacks for enter/exiting a graph
+ ctx := walker.EnterPath(g.Path)
+ defer walker.ExitPath(g.Path)
+
+ // Get the path for logs
+ path := strings.Join(ctx.Path(), ".")
+
+ // Determine if our walker is a panic wrapper
+ panicwrap, ok := walker.(GraphWalkerPanicwrapper)
+ if !ok {
+ panicwrap = nil // just to be sure
+ }
+
+ debugName := "walk-graph.json"
+ if g.debugName != "" {
+ debugName = g.debugName + "-" + debugName
+ }
+
+ debugBuf := dbug.NewFileWriter(debugName)
+ g.SetDebugWriter(debugBuf)
+ defer debugBuf.Close()
+
+ // Walk the graph.
+ var walkFn dag.WalkFunc
+ walkFn = func(v dag.Vertex) (rerr error) {
+ log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v))
+ g.DebugVisitInfo(v, g.debugName)
+
+ // If we have a panic wrap GraphWalker and a panic occurs, recover
+ // and call that. We ensure the return value is an error, however,
+ // so that future nodes are not called.
+ defer func() {
+ // If no panicwrap, do nothing
+ if panicwrap == nil {
+ return
+ }
+
+ // If no panic, do nothing
+ err := recover()
+ if err == nil {
+ return
+ }
+
+ // Modify the return value to show the error
+ rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s",
+ dag.VertexName(v), err, debug.Stack())
+
+ // Call the panic wrapper
+ panicwrap.Panic(v, err)
+ }()
+
+ walker.EnterVertex(v)
+ defer walker.ExitVertex(v, rerr)
+
+ // vertexCtx is the context that we use when evaluating. This
+ // is normally the context of our graph but can be overridden
+ // with a GraphNodeSubPath impl.
+ vertexCtx := ctx
+ if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 {
+ vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path()))
+ defer walker.ExitPath(pn.Path())
+ }
+
+ // If the node is eval-able, then evaluate it.
+ if ev, ok := v.(GraphNodeEvalable); ok {
+ tree := ev.EvalTree()
+ if tree == nil {
+ panic(fmt.Sprintf(
+ "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v))
+ }
+
+ // Allow the walker to change our tree if needed. Eval,
+ // then callback with the output.
+ log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v))
+
+ g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path))
+
+ tree = walker.EnterEvalTree(v, tree)
+ output, err := Eval(tree, vertexCtx)
+ if rerr = walker.ExitEvalTree(v, output, err); rerr != nil {
+ return
+ }
+ }
+
+ // If the node is dynamically expanded, then expand it
+ if ev, ok := v.(GraphNodeDynamicExpandable); ok {
+ log.Printf(
+ "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph",
+ path,
+ dag.VertexName(v))
+
+ g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path))
+
+ g, err := ev.DynamicExpand(vertexCtx)
+ if err != nil {
+ rerr = err
+ return
+ }
+ if g != nil {
+ // Walk the subgraph
+ if rerr = g.walk(walker); rerr != nil {
+ return
+ }
+ }
+ }
+
+ // If the node has a subgraph, then walk the subgraph
+ if sn, ok := v.(GraphNodeSubgraph); ok {
+ log.Printf(
+ "[DEBUG] vertex '%s.%s': walking subgraph",
+ path,
+ dag.VertexName(v))
+
+ g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path))
+
+ if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil {
+ return
+ }
+ }
+
+ return nil
+ }
+
+ return g.AcyclicGraph.Walk(walkFn)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
new file mode 100644
index 00000000..6374bb90
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
@@ -0,0 +1,77 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// GraphBuilder is an interface that can be implemented and used with
+// Terraform to build the graph that Terraform walks.
+type GraphBuilder interface {
+ // Build builds the graph for the given module path. It is up to
+ // the interface implementation whether this build should expand
+ // the graph or not.
+ Build(path []string) (*Graph, error)
+}
+
+// BasicGraphBuilder is a GraphBuilder that builds a graph out of a
+// series of transforms and (optionally) validates the graph is a valid
+// structure.
+type BasicGraphBuilder struct {
+ Steps []GraphTransformer
+ Validate bool
+ // Optional name to add to the graph debug log
+ Name string
+}
+
+func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
+ g := &Graph{Path: path}
+
+ debugName := "graph.json"
+ if b.Name != "" {
+ debugName = b.Name + "-" + debugName
+ }
+ debugBuf := dbug.NewFileWriter(debugName)
+ g.SetDebugWriter(debugBuf)
+ defer debugBuf.Close()
+
+ for _, step := range b.Steps {
+ if step == nil {
+ continue
+ }
+
+ stepName := fmt.Sprintf("%T", step)
+ dot := strings.LastIndex(stepName, ".")
+ if dot >= 0 {
+ stepName = stepName[dot+1:]
+ }
+
+ debugOp := g.DebugOperation(stepName, "")
+ err := step.Transform(g)
+
+ errMsg := ""
+ if err != nil {
+ errMsg = err.Error()
+ }
+ debugOp.End(errMsg)
+
+ log.Printf(
+ "[TRACE] Graph after step %T:\n\n%s",
+ step, g.StringWithNodeTypes())
+
+ if err != nil {
+ return g, err
+ }
+ }
+
+ // Validate the graph structure
+ if b.Validate {
+ if err := g.Validate(); err != nil {
+ log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String())
+ return nil, err
+ }
+ }
+
+ return g, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
new file mode 100644
index 00000000..38a90f27
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
@@ -0,0 +1,141 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ApplyGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for applying a Terraform diff.
+//
+// Because the graph is built from the diff (vs. the config or state),
+// this helps ensure that the apply-time graph doesn't modify any resources
+// that aren't explicitly in the diff. There are other scenarios where the
+// diff can be deviated, so this is just one layer of protection.
+type ApplyGraphBuilder struct {
+ // Module is the root module for the graph to build.
+ Module *module.Tree
+
+ // Diff is the diff to apply.
+ Diff *Diff
+
+ // State is the current state
+ State *State
+
+ // Providers is the list of providers supported.
+ Providers []string
+
+ // Provisioners is the list of provisioners supported.
+ Provisioners []string
+
+ // Targets are resources to target. This is only required to make sure
+ // unnecessary outputs aren't included in the apply graph. The plan
+ // builder successfully handles targeting resources. In the future,
+ // outputs should go into the diff so that this is unnecessary.
+ Targets []string
+
+ // DisableReduce, if true, will not reduce the graph. Great for testing.
+ DisableReduce bool
+
+ // Destroy, if true, represents a pure destroy operation
+ Destroy bool
+
+ // Validate will do structural validation of the graph.
+ Validate bool
+}
+
+// See GraphBuilder
+func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: b.Validate,
+ Name: "ApplyGraphBuilder",
+ }).Build(path)
+}
+
+// See GraphBuilder
+func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
+ // Custom factory for creating providers.
+ concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ return &NodeApplyableResource{
+ NodeAbstractResource: a,
+ }
+ }
+
+ steps := []GraphTransformer{
+ // Creates all the nodes represented in the diff.
+ &DiffTransformer{
+ Concrete: concreteResource,
+
+ Diff: b.Diff,
+ Module: b.Module,
+ State: b.State,
+ },
+
+ // Create orphan output nodes
+ &OrphanOutputTransformer{Module: b.Module, State: b.State},
+
+ // Attach the configuration to any resources
+ &AttachResourceConfigTransformer{Module: b.Module},
+
+ // Attach the state
+ &AttachStateTransformer{State: b.State},
+
+ // Create all the providers
+ &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: b.Module},
+
+ // Destruction ordering
+ &DestroyEdgeTransformer{Module: b.Module, State: b.State},
+ GraphTransformIf(
+ func() bool { return !b.Destroy },
+ &CBDEdgeTransformer{Module: b.Module, State: b.State},
+ ),
+
+ // Provisioner-related transformations
+ &MissingProvisionerTransformer{Provisioners: b.Provisioners},
+ &ProvisionerTransformer{},
+
+ // Add root variables
+ &RootVariableTransformer{Module: b.Module},
+
+ // Add the outputs
+ &OutputTransformer{Module: b.Module},
+
+ // Add module variables
+ &ModuleVariableTransformer{Module: b.Module},
+
+ // Connect references so ordering is correct
+ &ReferenceTransformer{},
+
+ // Add the node to fix the state count boundaries
+ &CountBoundaryTransformer{},
+
+ // Target
+ &TargetsTransformer{Targets: b.Targets},
+
+ // Close opened plugin connections
+ &CloseProviderTransformer{},
+ &CloseProvisionerTransformer{},
+
+ // Single root
+ &RootTransformer{},
+ }
+
+ if !b.DisableReduce {
+ // Perform the transitive reduction to make our graph a bit
+ // more sane if possible (it usually is possible).
+ steps = append(steps, &TransitiveReductionTransformer{})
+ }
+
+ return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
new file mode 100644
index 00000000..014b348e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
@@ -0,0 +1,67 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for
+// planning a pure-destroy.
+//
+// Planning a pure destroy operation is simple because we can ignore most
+// ordering configuration and simply reverse the state.
+type DestroyPlanGraphBuilder struct {
+ // Module is the root module for the graph to build.
+ Module *module.Tree
+
+ // State is the current state
+ State *State
+
+ // Targets are resources to target
+ Targets []string
+
+ // Validate will do structural validation of the graph.
+ Validate bool
+}
+
+// See GraphBuilder
+func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: b.Validate,
+ Name: "DestroyPlanGraphBuilder",
+ }).Build(path)
+}
+
+// See GraphBuilder
+func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ return &NodePlanDestroyableResource{
+ NodeAbstractResource: a,
+ }
+ }
+
+ steps := []GraphTransformer{
+ // Creates all the nodes represented in the state.
+ &StateTransformer{
+ Concrete: concreteResource,
+ State: b.State,
+ },
+
+ // Attach the configuration to any resources
+ &AttachResourceConfigTransformer{Module: b.Module},
+
+ // Destruction ordering. We require this only so that
+ // targeting below will prune the correct things.
+ &DestroyEdgeTransformer{Module: b.Module, State: b.State},
+
+ // Target. Note we don't set "Destroy: true" here since we already
+ // created proper destroy ordering.
+ &TargetsTransformer{Targets: b.Targets},
+
+ // Single root
+ &RootTransformer{},
+ }
+
+ return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
new file mode 100644
index 00000000..7070c59e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
@@ -0,0 +1,76 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ImportGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for importing resources into Terraform. This is a much, much
+// simpler graph than a normal configuration graph.
+type ImportGraphBuilder struct {
+ // ImportTargets are the list of resources to import.
+ ImportTargets []*ImportTarget
+
+ // Module is the module to add to the graph. See ImportOpts.Module.
+ Module *module.Tree
+
+ // Providers is the list of providers supported.
+ Providers []string
+}
+
+// Build builds the graph according to the steps returned by Steps.
+func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: true,
+ Name: "ImportGraphBuilder",
+ }).Build(path)
+}
+
+// Steps returns the ordered list of GraphTransformers that must be executed
+// to build a complete graph.
+func (b *ImportGraphBuilder) Steps() []GraphTransformer {
+ // Get the module. If we don't have one, we just use an empty tree
+ // so that the transform still works but does nothing.
+ mod := b.Module
+ if mod == nil {
+ mod = module.NewEmptyTree()
+ }
+
+ // Custom factory for creating providers.
+ concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ steps := []GraphTransformer{
+ // Create all our resources from the configuration and state
+ &ConfigTransformer{Module: mod},
+
+ // Add the import steps
+ &ImportStateTransformer{Targets: b.ImportTargets},
+
+ // Provider-related transformations
+ &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: mod},
+
+ // This validates that the providers only depend on variables
+ &ImportProviderValidateTransformer{},
+
+ // Close opened plugin connections
+ &CloseProviderTransformer{},
+
+ // Single root
+ &RootTransformer{},
+
+ // Optimize
+ &TransitiveReductionTransformer{},
+ }
+
+ return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
new file mode 100644
index 00000000..0df48cdb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
@@ -0,0 +1,27 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// InputGraphBuilder creates the graph for the input operation.
+//
+// Unlike other graph builders, this is a function since it currently modifies
+// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be
+// modified and should not be used for any other operations.
+func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
+ // We're going to customize the concrete functions
+ p.CustomConcrete = true
+
+ // Set the provider to the normal provider. This will ask for input.
+ p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ // We purposely don't set any more concrete fields since the remainder
+ // should be no-ops.
+
+ return p
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
new file mode 100644
index 00000000..02d86970
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
@@ -0,0 +1,161 @@
+package terraform
+
+import (
+ "sync"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// PlanGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for planning (creating a Terraform Diff).
+//
+// The primary difference between this graph and others:
+//
+// * Based on the config since it represents the target state
+//
+// * Ignores lifecycle options since no lifecycle events occur here. This
+// simplifies the graph significantly since complex transforms such as
+// create-before-destroy can be completely ignored.
+//
+type PlanGraphBuilder struct {
+ // Module is the root module for the graph to build.
+ Module *module.Tree
+
+ // State is the current state
+ State *State
+
+ // Providers is the list of providers supported.
+ Providers []string
+
+ // Provisioners is the list of provisioners supported.
+ Provisioners []string
+
+ // Targets are resources to target
+ Targets []string
+
+ // DisableReduce, if true, will not reduce the graph. Great for testing.
+ DisableReduce bool
+
+ // Validate will do structural validation of the graph.
+ Validate bool
+
+ // CustomConcrete can be set to customize the node types created
+ // for various parts of the plan. This is useful in order to customize
+ // the plan behavior.
+ CustomConcrete bool
+ ConcreteProvider ConcreteProviderNodeFunc
+ ConcreteResource ConcreteResourceNodeFunc
+ ConcreteResourceOrphan ConcreteResourceNodeFunc
+
+ once sync.Once
+}
+
+// See GraphBuilder
+func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: b.Validate,
+ Name: "PlanGraphBuilder",
+ }).Build(path)
+}
+
+// See GraphBuilder
+func (b *PlanGraphBuilder) Steps() []GraphTransformer {
+ b.once.Do(b.init)
+
+ steps := []GraphTransformer{
+ // Creates all the resources represented in the config
+ &ConfigTransformer{
+ Concrete: b.ConcreteResource,
+ Module: b.Module,
+ },
+
+ // Add the outputs
+ &OutputTransformer{Module: b.Module},
+
+ // Add orphan resources
+ &OrphanResourceTransformer{
+ Concrete: b.ConcreteResourceOrphan,
+ State: b.State,
+ Module: b.Module,
+ },
+
+ // Attach the configuration to any resources
+ &AttachResourceConfigTransformer{Module: b.Module},
+
+ // Attach the state
+ &AttachStateTransformer{State: b.State},
+
+ // Add root variables
+ &RootVariableTransformer{Module: b.Module},
+
+ // Create all the providers
+ &MissingProviderTransformer{Providers: b.Providers, Concrete: b.ConcreteProvider},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: b.Module},
+
+ // Provisioner-related transformations. Only add these if requested.
+ GraphTransformIf(
+ func() bool { return b.Provisioners != nil },
+ GraphTransformMulti(
+ &MissingProvisionerTransformer{Provisioners: b.Provisioners},
+ &ProvisionerTransformer{},
+ ),
+ ),
+
+ // Add module variables
+ &ModuleVariableTransformer{Module: b.Module},
+
+ // Connect so that the references are ready for targeting. We'll
+ // have to connect again later for providers and so on.
+ &ReferenceTransformer{},
+
+ // Target
+ &TargetsTransformer{Targets: b.Targets},
+
+ // Close opened plugin connections
+ &CloseProviderTransformer{},
+ &CloseProvisionerTransformer{},
+
+ // Single root
+ &RootTransformer{},
+ }
+
+ if !b.DisableReduce {
+ // Perform the transitive reduction to make our graph a bit
+ // more sane if possible (it usually is possible).
+ steps = append(steps, &TransitiveReductionTransformer{})
+ }
+
+ return steps
+}
+
+func (b *PlanGraphBuilder) init() {
+ // Do nothing if the user requests customizing the fields
+ if b.CustomConcrete {
+ return
+ }
+
+ b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
+ return &NodePlannableResource{
+ NodeAbstractCountResource: &NodeAbstractCountResource{
+ NodeAbstractResource: a,
+ },
+ }
+ }
+
+ b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex {
+ return &NodePlannableResourceOrphan{
+ NodeAbstractResource: a,
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
new file mode 100644
index 00000000..88ae3380
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
@@ -0,0 +1,132 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// RefreshGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for refreshing (updating the Terraform state).
+//
+// The primary difference between this graph and others:
+//
+// * Based on the state since it represents the only resources that
+// need to be refreshed.
+//
+// * Ignores lifecycle options since no lifecycle events occur here. This
+// simplifies the graph significantly since complex transforms such as
+// create-before-destroy can be completely ignored.
+//
+type RefreshGraphBuilder struct {
+ // Module is the root module for the graph to build.
+ Module *module.Tree
+
+ // State is the current state
+ State *State
+
+ // Providers is the list of providers supported.
+ Providers []string
+
+ // Targets are resources to target
+ Targets []string
+
+ // DisableReduce, if true, will not reduce the graph. Great for testing.
+ DisableReduce bool
+
+ // Validate will do structural validation of the graph.
+ Validate bool
+}
+
+// See GraphBuilder
+func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) {
+ return (&BasicGraphBuilder{
+ Steps: b.Steps(),
+ Validate: b.Validate,
+ Name: "RefreshGraphBuilder",
+ }).Build(path)
+}
+
+// See GraphBuilder
+func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
+ // Custom factory for creating providers.
+ concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ return &NodeRefreshableResource{
+ NodeAbstractResource: a,
+ }
+ }
+
+ concreteDataResource := func(a *NodeAbstractResource) dag.Vertex {
+ return &NodeRefreshableDataResource{
+ NodeAbstractCountResource: &NodeAbstractCountResource{
+ NodeAbstractResource: a,
+ },
+ }
+ }
+
+ steps := []GraphTransformer{
+ // Creates all the resources represented in the state
+ &StateTransformer{
+ Concrete: concreteResource,
+ State: b.State,
+ },
+
+ // Creates all the data resources that aren't in the state
+ &ConfigTransformer{
+ Concrete: concreteDataResource,
+ Module: b.Module,
+ Unique: true,
+ ModeFilter: true,
+ Mode: config.DataResourceMode,
+ },
+
+ // Attach the state
+ &AttachStateTransformer{State: b.State},
+
+ // Attach the configuration to any resources
+ &AttachResourceConfigTransformer{Module: b.Module},
+
+ // Add root variables
+ &RootVariableTransformer{Module: b.Module},
+
+ // Create all the providers
+ &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: b.Module},
+
+ // Add the outputs
+ &OutputTransformer{Module: b.Module},
+
+ // Add module variables
+ &ModuleVariableTransformer{Module: b.Module},
+
+ // Connect so that the references are ready for targeting. We'll
+ // have to connect again later for providers and so on.
+ &ReferenceTransformer{},
+
+ // Target
+ &TargetsTransformer{Targets: b.Targets},
+
+ // Close opened plugin connections
+ &CloseProviderTransformer{},
+
+ // Single root
+ &RootTransformer{},
+ }
+
+ if !b.DisableReduce {
+ // Perform the transitive reduction to make our graph a bit
+ // more sane if possible (it usually is possible).
+ steps = append(steps, &TransitiveReductionTransformer{})
+ }
+
+ return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
new file mode 100644
index 00000000..645ec7be
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
@@ -0,0 +1,36 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ValidateGraphBuilder creates the graph for the validate operation.
+//
+// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that
+// we only have to validate what we'd normally plan anyways. The
+// PlanGraphBuilder given will be modified so it shouldn't be used for anything
+// else after calling this function.
+func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
+ // We're going to customize the concrete functions
+ p.CustomConcrete = true
+
+ // Set the provider to the normal provider. This will ask for input.
+ p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{
+ NodeAbstractProvider: a,
+ }
+ }
+
+ p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
+ return &NodeValidatableResource{
+ NodeAbstractCountResource: &NodeAbstractCountResource{
+ NodeAbstractResource: a,
+ },
+ }
+ }
+
+ // We purposely don't set any other concrete types since they don't
+ // require validation.
+
+ return p
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
new file mode 100644
index 00000000..73e3821f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
@@ -0,0 +1,9 @@
+package terraform
+
+import "github.com/hashicorp/terraform/dag"
+
+// GraphDot returns the dot formatting of a visual representation of
+// the given Terraform graph.
+func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) {
+ return string(g.Dot(opts)), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
new file mode 100644
index 00000000..2897eb54
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
@@ -0,0 +1,7 @@
+package terraform
+
+// GraphNodeSubPath says that a node is part of a graph with a
+// different path, and the context should be adjusted accordingly.
+type GraphNodeSubPath interface {
+ Path() []string
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
new file mode 100644
index 00000000..34ce6f64
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
@@ -0,0 +1,60 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphWalker is an interface that can be implemented that when used
+// with Graph.Walk will invoke the given callbacks under certain events.
+type GraphWalker interface {
+ EnterPath([]string) EvalContext
+ ExitPath([]string)
+ EnterVertex(dag.Vertex)
+ ExitVertex(dag.Vertex, error)
+ EnterEvalTree(dag.Vertex, EvalNode) EvalNode
+ ExitEvalTree(dag.Vertex, interface{}, error) error
+}
+
+// GrpahWalkerPanicwrapper can be optionally implemented to catch panics
+// that occur while walking the graph. This is not generally recommended
+// since panics should crash Terraform and result in a bug report. However,
+// this is particularly useful for situations like the shadow graph where
+// you don't ever want to cause a panic.
+type GraphWalkerPanicwrapper interface {
+ GraphWalker
+
+ // Panic is called when a panic occurs. This will halt the panic from
+ // propogating so if the walker wants it to crash still it should panic
+ // again. This is called from within a defer so runtime/debug.Stack can
+ // be used to get the stack trace of the panic.
+ Panic(dag.Vertex, interface{})
+}
+
+// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow
+// the panics. This doesn't lose the panics since the panics are still
+// returned as errors as part of a graph walk.
+func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper {
+ return &graphWalkerPanicwrapper{
+ GraphWalker: w,
+ }
+}
+
+type graphWalkerPanicwrapper struct {
+ GraphWalker
+}
+
+func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {}
+
+// NullGraphWalker is a GraphWalker implementation that does nothing.
+// This can be embedded within other GraphWalker implementations for easily
+// implementing all the required functions.
+type NullGraphWalker struct{}
+
+func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) }
+func (NullGraphWalker) ExitPath([]string) {}
+func (NullGraphWalker) EnterVertex(dag.Vertex) {}
+func (NullGraphWalker) ExitVertex(dag.Vertex, error) {}
+func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n }
+func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error {
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
new file mode 100644
index 00000000..e63b4603
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
@@ -0,0 +1,157 @@
+package terraform
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "sync"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ContextGraphWalker is the GraphWalker implementation used with the
+// Context struct to walk and evaluate the graph.
+type ContextGraphWalker struct {
+ NullGraphWalker
+
+ // Configurable values
+ Context *Context
+ Operation walkOperation
+ StopContext context.Context
+
+ // Outputs, do not set these. Do not read these while the graph
+ // is being walked.
+ ValidationWarnings []string
+ ValidationErrors []error
+
+ errorLock sync.Mutex
+ once sync.Once
+ contexts map[string]*BuiltinEvalContext
+ contextLock sync.Mutex
+ interpolaterVars map[string]map[string]interface{}
+ interpolaterVarLock sync.Mutex
+ providerCache map[string]ResourceProvider
+ providerConfigCache map[string]*ResourceConfig
+ providerLock sync.Mutex
+ provisionerCache map[string]ResourceProvisioner
+ provisionerLock sync.Mutex
+}
+
+func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
+ w.once.Do(w.init)
+
+ w.contextLock.Lock()
+ defer w.contextLock.Unlock()
+
+ // If we already have a context for this path cached, use that
+ key := PathCacheKey(path)
+ if ctx, ok := w.contexts[key]; ok {
+ return ctx
+ }
+
+ // Setup the variables for this interpolater
+ variables := make(map[string]interface{})
+ if len(path) <= 1 {
+ for k, v := range w.Context.variables {
+ variables[k] = v
+ }
+ }
+ w.interpolaterVarLock.Lock()
+ if m, ok := w.interpolaterVars[key]; ok {
+ for k, v := range m {
+ variables[k] = v
+ }
+ }
+ w.interpolaterVars[key] = variables
+ w.interpolaterVarLock.Unlock()
+
+ ctx := &BuiltinEvalContext{
+ StopContext: w.StopContext,
+ PathValue: path,
+ Hooks: w.Context.hooks,
+ InputValue: w.Context.uiInput,
+ Components: w.Context.components,
+ ProviderCache: w.providerCache,
+ ProviderConfigCache: w.providerConfigCache,
+ ProviderInputConfig: w.Context.providerInputConfig,
+ ProviderLock: &w.providerLock,
+ ProvisionerCache: w.provisionerCache,
+ ProvisionerLock: &w.provisionerLock,
+ DiffValue: w.Context.diff,
+ DiffLock: &w.Context.diffLock,
+ StateValue: w.Context.state,
+ StateLock: &w.Context.stateLock,
+ Interpolater: &Interpolater{
+ Operation: w.Operation,
+ Meta: w.Context.meta,
+ Module: w.Context.module,
+ State: w.Context.state,
+ StateLock: &w.Context.stateLock,
+ VariableValues: variables,
+ VariableValuesLock: &w.interpolaterVarLock,
+ },
+ InterpolaterVars: w.interpolaterVars,
+ InterpolaterVarLock: &w.interpolaterVarLock,
+ }
+
+ w.contexts[key] = ctx
+ return ctx
+}
+
+func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
+ log.Printf("[TRACE] [%s] Entering eval tree: %s",
+ w.Operation, dag.VertexName(v))
+
+ // Acquire a lock on the semaphore
+ w.Context.parallelSem.Acquire()
+
+ // We want to filter the evaluation tree to only include operations
+ // that belong in this operation.
+ return EvalFilter(n, EvalNodeFilterOp(w.Operation))
+}
+
+func (w *ContextGraphWalker) ExitEvalTree(
+ v dag.Vertex, output interface{}, err error) error {
+ log.Printf("[TRACE] [%s] Exiting eval tree: %s",
+ w.Operation, dag.VertexName(v))
+
+ // Release the semaphore
+ w.Context.parallelSem.Release()
+
+ if err == nil {
+ return nil
+ }
+
+ // Acquire the lock because anything is going to require a lock.
+ w.errorLock.Lock()
+ defer w.errorLock.Unlock()
+
+ // Try to get a validation error out of it. If its not a validation
+ // error, then just record the normal error.
+ verr, ok := err.(*EvalValidateError)
+ if !ok {
+ return err
+ }
+
+ for _, msg := range verr.Warnings {
+ w.ValidationWarnings = append(
+ w.ValidationWarnings,
+ fmt.Sprintf("%s: %s", dag.VertexName(v), msg))
+ }
+ for _, e := range verr.Errors {
+ w.ValidationErrors = append(
+ w.ValidationErrors,
+ errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e))
+ }
+
+ return nil
+}
+
+func (w *ContextGraphWalker) init() {
+ w.contexts = make(map[string]*BuiltinEvalContext, 5)
+ w.providerCache = make(map[string]ResourceProvider, 5)
+ w.providerConfigCache = make(map[string]*ResourceConfig, 5)
+ w.provisionerCache = make(map[string]ResourceProvisioner, 5)
+ w.interpolaterVars = make(map[string]map[string]interface{}, 5)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
new file mode 100644
index 00000000..3fb37481
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
@@ -0,0 +1,18 @@
+package terraform
+
+//go:generate stringer -type=walkOperation graph_walk_operation.go
+
+// walkOperation is an enum which tells the walkContext what to do.
+type walkOperation byte
+
+const (
+ walkInvalid walkOperation = iota
+ walkInput
+ walkApply
+ walkPlan
+ walkPlanDestroy
+ walkRefresh
+ walkValidate
+ walkDestroy
+ walkImport
+)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
new file mode 100644
index 00000000..e97b4855
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT.
+
+package terraform
+
+import "fmt"
+
+const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate"
+
+var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125}
+
+func (i GraphType) String() string {
+ if i >= GraphType(len(_GraphType_index)-1) {
+ return fmt.Sprintf("GraphType(%d)", i)
+ }
+ return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go
new file mode 100644
index 00000000..ab11e8ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go
@@ -0,0 +1,137 @@
+package terraform
+
+// HookAction is an enum of actions that can be taken as a result of a hook
+// callback. This allows you to modify the behavior of Terraform at runtime.
+type HookAction byte
+
+const (
+ // HookActionContinue continues with processing as usual.
+ HookActionContinue HookAction = iota
+
+ // HookActionHalt halts immediately: no more hooks are processed
+ // and the action that Terraform was about to take is cancelled.
+ HookActionHalt
+)
+
+// Hook is the interface that must be implemented to hook into various
+// parts of Terraform, allowing you to inspect or change behavior at runtime.
+//
+// There are MANY hook points into Terraform. If you only want to implement
+// some hook points, but not all (which is the likely case), then embed the
+// NilHook into your struct, which implements all of the interface but does
+// nothing. Then, override only the functions you want to implement.
+type Hook interface {
+ // PreApply and PostApply are called before and after a single
+ // resource is applied. The error argument in PostApply is the
+ // error, if any, that was returned from the provider Apply call itself.
+ PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error)
+ PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error)
+
+ // PreDiff and PostDiff are called before and after a single resource
+ // resource is diffed.
+ PreDiff(*InstanceInfo, *InstanceState) (HookAction, error)
+ PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error)
+
+ // Provisioning hooks
+ //
+ // All should be self-explanatory. ProvisionOutput is called with
+ // output sent back by the provisioners. This will be called multiple
+ // times as output comes in, but each call should represent a line of
+ // output. The ProvisionOutput method cannot control whether the
+ // hook continues running.
+ PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
+ PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
+ PreProvision(*InstanceInfo, string) (HookAction, error)
+ PostProvision(*InstanceInfo, string, error) (HookAction, error)
+ ProvisionOutput(*InstanceInfo, string, string)
+
+ // PreRefresh and PostRefresh are called before and after a single
+ // resource state is refreshed, respectively.
+ PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
+ PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
+
+ // PostStateUpdate is called after the state is updated.
+ PostStateUpdate(*State) (HookAction, error)
+
+ // PreImportState and PostImportState are called before and after
+ // a single resource's state is being improted.
+ PreImportState(*InstanceInfo, string) (HookAction, error)
+ PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error)
+}
+
+// NilHook is a Hook implementation that does nothing. It exists only to
+// simplify implementing hooks. You can embed this into your Hook implementation
+// and only implement the functions you are interested in.
+type NilHook struct{}
+
+func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) ProvisionOutput(
+ *InstanceInfo, string, string) {
+}
+
+func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+func (*NilHook) PostStateUpdate(*State) (HookAction, error) {
+ return HookActionContinue, nil
+}
+
+// handleHook turns hook actions into panics. This lets you use the
+// panic/recover mechanism in Go as a flow control mechanism for hook
+// actions.
+func handleHook(a HookAction, err error) {
+ if err != nil {
+ // TODO: handle errors
+ }
+
+ switch a {
+ case HookActionContinue:
+ return
+ case HookActionHalt:
+ panic(HookActionHalt)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
new file mode 100644
index 00000000..0e464006
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
@@ -0,0 +1,245 @@
+package terraform
+
+import "sync"
+
+// MockHook is an implementation of Hook that can be used for tests.
+// It records all of its function calls.
+type MockHook struct {
+ sync.Mutex
+
+ PreApplyCalled bool
+ PreApplyInfo *InstanceInfo
+ PreApplyDiff *InstanceDiff
+ PreApplyState *InstanceState
+ PreApplyReturn HookAction
+ PreApplyError error
+
+ PostApplyCalled bool
+ PostApplyInfo *InstanceInfo
+ PostApplyState *InstanceState
+ PostApplyError error
+ PostApplyReturn HookAction
+ PostApplyReturnError error
+ PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error)
+
+ PreDiffCalled bool
+ PreDiffInfo *InstanceInfo
+ PreDiffState *InstanceState
+ PreDiffReturn HookAction
+ PreDiffError error
+
+ PostDiffCalled bool
+ PostDiffInfo *InstanceInfo
+ PostDiffDiff *InstanceDiff
+ PostDiffReturn HookAction
+ PostDiffError error
+
+ PreProvisionResourceCalled bool
+ PreProvisionResourceInfo *InstanceInfo
+ PreProvisionInstanceState *InstanceState
+ PreProvisionResourceReturn HookAction
+ PreProvisionResourceError error
+
+ PostProvisionResourceCalled bool
+ PostProvisionResourceInfo *InstanceInfo
+ PostProvisionInstanceState *InstanceState
+ PostProvisionResourceReturn HookAction
+ PostProvisionResourceError error
+
+ PreProvisionCalled bool
+ PreProvisionInfo *InstanceInfo
+ PreProvisionProvisionerId string
+ PreProvisionReturn HookAction
+ PreProvisionError error
+
+ PostProvisionCalled bool
+ PostProvisionInfo *InstanceInfo
+ PostProvisionProvisionerId string
+ PostProvisionErrorArg error
+ PostProvisionReturn HookAction
+ PostProvisionError error
+
+ ProvisionOutputCalled bool
+ ProvisionOutputInfo *InstanceInfo
+ ProvisionOutputProvisionerId string
+ ProvisionOutputMessage string
+
+ PostRefreshCalled bool
+ PostRefreshInfo *InstanceInfo
+ PostRefreshState *InstanceState
+ PostRefreshReturn HookAction
+ PostRefreshError error
+
+ PreRefreshCalled bool
+ PreRefreshInfo *InstanceInfo
+ PreRefreshState *InstanceState
+ PreRefreshReturn HookAction
+ PreRefreshError error
+
+ PreImportStateCalled bool
+ PreImportStateInfo *InstanceInfo
+ PreImportStateId string
+ PreImportStateReturn HookAction
+ PreImportStateError error
+
+ PostImportStateCalled bool
+ PostImportStateInfo *InstanceInfo
+ PostImportStateState []*InstanceState
+ PostImportStateReturn HookAction
+ PostImportStateError error
+
+ PostStateUpdateCalled bool
+ PostStateUpdateState *State
+ PostStateUpdateReturn HookAction
+ PostStateUpdateError error
+}
+
+func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreApplyCalled = true
+ h.PreApplyInfo = n
+ h.PreApplyDiff = d
+ h.PreApplyState = s
+ return h.PreApplyReturn, h.PreApplyError
+}
+
+func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostApplyCalled = true
+ h.PostApplyInfo = n
+ h.PostApplyState = s
+ h.PostApplyError = e
+
+ if h.PostApplyFn != nil {
+ return h.PostApplyFn(n, s, e)
+ }
+
+ return h.PostApplyReturn, h.PostApplyReturnError
+}
+
+func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreDiffCalled = true
+ h.PreDiffInfo = n
+ h.PreDiffState = s
+ return h.PreDiffReturn, h.PreDiffError
+}
+
+func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostDiffCalled = true
+ h.PostDiffInfo = n
+ h.PostDiffDiff = d
+ return h.PostDiffReturn, h.PostDiffError
+}
+
+func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreProvisionResourceCalled = true
+ h.PreProvisionResourceInfo = n
+ h.PreProvisionInstanceState = s
+ return h.PreProvisionResourceReturn, h.PreProvisionResourceError
+}
+
+func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostProvisionResourceCalled = true
+ h.PostProvisionResourceInfo = n
+ h.PostProvisionInstanceState = s
+ return h.PostProvisionResourceReturn, h.PostProvisionResourceError
+}
+
+func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreProvisionCalled = true
+ h.PreProvisionInfo = n
+ h.PreProvisionProvisionerId = provId
+ return h.PreProvisionReturn, h.PreProvisionError
+}
+
+func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostProvisionCalled = true
+ h.PostProvisionInfo = n
+ h.PostProvisionProvisionerId = provId
+ h.PostProvisionErrorArg = err
+ return h.PostProvisionReturn, h.PostProvisionError
+}
+
+func (h *MockHook) ProvisionOutput(
+ n *InstanceInfo,
+ provId string,
+ msg string) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.ProvisionOutputCalled = true
+ h.ProvisionOutputInfo = n
+ h.ProvisionOutputProvisionerId = provId
+ h.ProvisionOutputMessage = msg
+}
+
+func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreRefreshCalled = true
+ h.PreRefreshInfo = n
+ h.PreRefreshState = s
+ return h.PreRefreshReturn, h.PreRefreshError
+}
+
+func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostRefreshCalled = true
+ h.PostRefreshInfo = n
+ h.PostRefreshState = s
+ return h.PostRefreshReturn, h.PostRefreshError
+}
+
+func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PreImportStateCalled = true
+ h.PreImportStateInfo = info
+ h.PreImportStateId = id
+ return h.PreImportStateReturn, h.PreImportStateError
+}
+
+func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostImportStateCalled = true
+ h.PostImportStateInfo = info
+ h.PostImportStateState = s
+ return h.PostImportStateReturn, h.PostImportStateError
+}
+
+func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) {
+ h.Lock()
+ defer h.Unlock()
+
+ h.PostStateUpdateCalled = true
+ h.PostStateUpdateState = s
+ return h.PostStateUpdateReturn, h.PostStateUpdateError
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
new file mode 100644
index 00000000..104d0098
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
@@ -0,0 +1,87 @@
+package terraform
+
+import (
+ "sync/atomic"
+)
+
+// stopHook is a private Hook implementation that Terraform uses to
+// signal when to stop or cancel actions.
+type stopHook struct {
+ stop uint32
+}
+
+func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) {
+}
+
+func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) PostStateUpdate(*State) (HookAction, error) {
+ return h.hook()
+}
+
+func (h *stopHook) hook() (HookAction, error) {
+ if h.Stopped() {
+ return HookActionHalt, nil
+ }
+
+ return HookActionContinue, nil
+}
+
+// reset should be called within the lock context
+func (h *stopHook) Reset() {
+ atomic.StoreUint32(&h.stop, 0)
+}
+
+func (h *stopHook) Stop() {
+ atomic.StoreUint32(&h.stop, 1)
+}
+
+func (h *stopHook) Stopped() bool {
+ return atomic.LoadUint32(&h.stop) == 1
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
new file mode 100644
index 00000000..08959717
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
@@ -0,0 +1,13 @@
+package terraform
+
+//go:generate stringer -type=InstanceType instancetype.go
+
+// InstanceType is an enum of the various types of instances store in the State
+type InstanceType int
+
+const (
+ TypeInvalid InstanceType = iota
+ TypePrimary
+ TypeTainted
+ TypeDeposed
+)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
new file mode 100644
index 00000000..f69267cd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT.
+
+package terraform
+
+import "fmt"
+
+const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed"
+
+var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44}
+
+func (i InstanceType) String() string {
+ if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) {
+ return fmt.Sprintf("InstanceType(%d)", i)
+ }
+ return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
new file mode 100644
index 00000000..855548c0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
@@ -0,0 +1,790 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/hil"
+ "github.com/hashicorp/hil/ast"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/flatmap"
+)
+
+const (
+ // VarEnvPrefix is the prefix of variables that are read from
+ // the environment to set variables here.
+ VarEnvPrefix = "TF_VAR_"
+)
+
+// Interpolater is the structure responsible for determining the values
+// for interpolations such as `aws_instance.foo.bar`.
+type Interpolater struct {
+ Operation walkOperation
+ Meta *ContextMeta
+ Module *module.Tree
+ State *State
+ StateLock *sync.RWMutex
+ VariableValues map[string]interface{}
+ VariableValuesLock *sync.Mutex
+}
+
+// InterpolationScope is the current scope of execution. This is required
+// since some variables which are interpolated are dependent on what we're
+// operating on and where we are.
+type InterpolationScope struct {
+ Path []string
+ Resource *Resource
+}
+
+// Values returns the values for all the variables in the given map.
+func (i *Interpolater) Values(
+ scope *InterpolationScope,
+ vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) {
+ if scope == nil {
+ scope = &InterpolationScope{}
+ }
+
+ result := make(map[string]ast.Variable, len(vars))
+
+ // Copy the default variables
+ if i.Module != nil && scope != nil {
+ mod := i.Module
+ if len(scope.Path) > 1 {
+ mod = i.Module.Child(scope.Path[1:])
+ }
+ for _, v := range mod.Config().Variables {
+ // Set default variables
+ if v.Default == nil {
+ continue
+ }
+
+ n := fmt.Sprintf("var.%s", v.Name)
+ variable, err := hil.InterfaceToVariable(v.Default)
+ if err != nil {
+ return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default)
+ }
+
+ result[n] = variable
+ }
+ }
+
+ for n, rawV := range vars {
+ var err error
+ switch v := rawV.(type) {
+ case *config.CountVariable:
+ err = i.valueCountVar(scope, n, v, result)
+ case *config.ModuleVariable:
+ err = i.valueModuleVar(scope, n, v, result)
+ case *config.PathVariable:
+ err = i.valuePathVar(scope, n, v, result)
+ case *config.ResourceVariable:
+ err = i.valueResourceVar(scope, n, v, result)
+ case *config.SelfVariable:
+ err = i.valueSelfVar(scope, n, v, result)
+ case *config.SimpleVariable:
+ err = i.valueSimpleVar(scope, n, v, result)
+ case *config.TerraformVariable:
+ err = i.valueTerraformVar(scope, n, v, result)
+ case *config.UserVariable:
+ err = i.valueUserVar(scope, n, v, result)
+ default:
+ err = fmt.Errorf("%s: unknown variable type: %T", n, rawV)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return result, nil
+}
+
+func (i *Interpolater) valueCountVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.CountVariable,
+ result map[string]ast.Variable) error {
+ switch v.Type {
+ case config.CountValueIndex:
+ if scope.Resource == nil {
+ return fmt.Errorf("%s: count.index is only valid within resources", n)
+ }
+ result[n] = ast.Variable{
+ Value: scope.Resource.CountIndex,
+ Type: ast.TypeInt,
+ }
+ return nil
+ default:
+ return fmt.Errorf("%s: unknown count type: %#v", n, v.Type)
+ }
+}
+
+func unknownVariable() ast.Variable {
+ return ast.Variable{
+ Type: ast.TypeUnknown,
+ Value: config.UnknownVariableValue,
+ }
+}
+
+func unknownValue() string {
+ return hil.UnknownValue
+}
+
+func (i *Interpolater) valueModuleVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.ModuleVariable,
+ result map[string]ast.Variable) error {
+
+ // Build the path to the child module we want
+ path := make([]string, len(scope.Path), len(scope.Path)+1)
+ copy(path, scope.Path)
+ path = append(path, v.Name)
+
+ // Grab the lock so that if other interpolations are running or
+ // state is being modified, we'll be safe.
+ i.StateLock.RLock()
+ defer i.StateLock.RUnlock()
+
+ // Get the module where we're looking for the value
+ mod := i.State.ModuleByPath(path)
+ if mod == nil {
+ // If the module doesn't exist, then we can return an empty string.
+ // This happens usually only in Refresh() when we haven't populated
+ // a state. During validation, we semantically verify that all
+ // modules reference other modules, and graph ordering should
+ // ensure that the module is in the state, so if we reach this
+ // point otherwise it really is a panic.
+ result[n] = unknownVariable()
+
+ // During apply this is always an error
+ if i.Operation == walkApply {
+ return fmt.Errorf(
+ "Couldn't find module %q for var: %s",
+ v.Name, v.FullKey())
+ }
+ } else {
+ // Get the value from the outputs
+ if outputState, ok := mod.Outputs[v.Field]; ok {
+ output, err := hil.InterfaceToVariable(outputState.Value)
+ if err != nil {
+ return err
+ }
+ result[n] = output
+ } else {
+ // Same reasons as the comment above.
+ result[n] = unknownVariable()
+
+ // During apply this is always an error
+ if i.Operation == walkApply {
+ return fmt.Errorf(
+ "Couldn't find output %q for module var: %s",
+ v.Field, v.FullKey())
+ }
+ }
+ }
+
+ return nil
+}
+
+func (i *Interpolater) valuePathVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.PathVariable,
+ result map[string]ast.Variable) error {
+ switch v.Type {
+ case config.PathValueCwd:
+ wd, err := os.Getwd()
+ if err != nil {
+ return fmt.Errorf(
+ "Couldn't get cwd for var %s: %s",
+ v.FullKey(), err)
+ }
+
+ result[n] = ast.Variable{
+ Value: wd,
+ Type: ast.TypeString,
+ }
+ case config.PathValueModule:
+ if t := i.Module.Child(scope.Path[1:]); t != nil {
+ result[n] = ast.Variable{
+ Value: t.Config().Dir,
+ Type: ast.TypeString,
+ }
+ }
+ case config.PathValueRoot:
+ result[n] = ast.Variable{
+ Value: i.Module.Config().Dir,
+ Type: ast.TypeString,
+ }
+ default:
+ return fmt.Errorf("%s: unknown path type: %#v", n, v.Type)
+ }
+
+ return nil
+
+}
+
+func (i *Interpolater) valueResourceVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.ResourceVariable,
+ result map[string]ast.Variable) error {
+ // If we're computing all dynamic fields, then module vars count
+ // and we mark it as computed.
+ if i.Operation == walkValidate {
+ result[n] = unknownVariable()
+ return nil
+ }
+
+ var variable *ast.Variable
+ var err error
+
+ if v.Multi && v.Index == -1 {
+ variable, err = i.computeResourceMultiVariable(scope, v)
+ } else {
+ variable, err = i.computeResourceVariable(scope, v)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if variable == nil {
+ // During the input walk we tolerate missing variables because
+ // we haven't yet had a chance to refresh state, so dynamic data may
+ // not yet be complete.
+ // If it truly is missing, we'll catch it on a later walk.
+ // This applies only to graph nodes that interpolate during the
+ // config walk, e.g. providers.
+ if i.Operation == walkInput || i.Operation == walkRefresh {
+ result[n] = unknownVariable()
+ return nil
+ }
+
+ return fmt.Errorf("variable %q is nil, but no error was reported", v.Name)
+ }
+
+ result[n] = *variable
+ return nil
+}
+
+func (i *Interpolater) valueSelfVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.SelfVariable,
+ result map[string]ast.Variable) error {
+ if scope == nil || scope.Resource == nil {
+ return fmt.Errorf(
+ "%s: invalid scope, self variables are only valid on resources", n)
+ }
+
+ rv, err := config.NewResourceVariable(fmt.Sprintf(
+ "%s.%s.%d.%s",
+ scope.Resource.Type,
+ scope.Resource.Name,
+ scope.Resource.CountIndex,
+ v.Field))
+ if err != nil {
+ return err
+ }
+
+ return i.valueResourceVar(scope, n, rv, result)
+}
+
+func (i *Interpolater) valueSimpleVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.SimpleVariable,
+ result map[string]ast.Variable) error {
+ // This error message includes some information for people who
+ // relied on this for their template_file data sources. We should
+ // remove this at some point but there isn't any rush.
+ return fmt.Errorf(
+ "invalid variable syntax: %q. Did you mean 'var.%s'? If this is part of inline `template` parameter\n"+
+ "then you must escape the interpolation with two dollar signs. For\n"+
+ "example: ${a} becomes $${a}.",
+ n, n)
+}
+
+func (i *Interpolater) valueTerraformVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.TerraformVariable,
+ result map[string]ast.Variable) error {
+ if v.Field != "env" {
+ return fmt.Errorf(
+ "%s: only supported key for 'terraform.X' interpolations is 'env'", n)
+ }
+
+ if i.Meta == nil {
+ return fmt.Errorf(
+ "%s: internal error: nil Meta. Please report a bug.", n)
+ }
+
+ result[n] = ast.Variable{Type: ast.TypeString, Value: i.Meta.Env}
+ return nil
+}
+
+func (i *Interpolater) valueUserVar(
+ scope *InterpolationScope,
+ n string,
+ v *config.UserVariable,
+ result map[string]ast.Variable) error {
+ i.VariableValuesLock.Lock()
+ defer i.VariableValuesLock.Unlock()
+ val, ok := i.VariableValues[v.Name]
+ if ok {
+ varValue, err := hil.InterfaceToVariable(val)
+ if err != nil {
+ return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
+ v.Name, val, err)
+ }
+ result[n] = varValue
+ return nil
+ }
+
+ if _, ok := result[n]; !ok && i.Operation == walkValidate {
+ result[n] = unknownVariable()
+ return nil
+ }
+
+ // Look up if we have any variables with this prefix because
+ // those are map overrides. Include those.
+ for k, val := range i.VariableValues {
+ if strings.HasPrefix(k, v.Name+".") {
+ keyComponents := strings.Split(k, ".")
+ overrideKey := keyComponents[len(keyComponents)-1]
+
+ mapInterface, ok := result["var."+v.Name]
+ if !ok {
+ return fmt.Errorf("override for non-existent variable: %s", v.Name)
+ }
+
+ mapVariable := mapInterface.Value.(map[string]ast.Variable)
+
+ varValue, err := hil.InterfaceToVariable(val)
+ if err != nil {
+ return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
+ v.Name, val, err)
+ }
+ mapVariable[overrideKey] = varValue
+ }
+ }
+
+ return nil
+}
+
+func (i *Interpolater) computeResourceVariable(
+ scope *InterpolationScope,
+ v *config.ResourceVariable) (*ast.Variable, error) {
+ id := v.ResourceId()
+ if v.Multi {
+ id = fmt.Sprintf("%s.%d", id, v.Index)
+ }
+
+ i.StateLock.RLock()
+ defer i.StateLock.RUnlock()
+
+ unknownVariable := unknownVariable()
+
+ // These variables must be declared early because of the use of GOTO
+ var isList bool
+ var isMap bool
+
+ // Get the information about this resource variable, and verify
+ // that it exists and such.
+ module, cr, err := i.resourceVariableInfo(scope, v)
+ if err != nil {
+ return nil, err
+ }
+
+ // If we're requesting "count" its a special variable that we grab
+ // directly from the config itself.
+ if v.Field == "count" {
+ var count int
+ if cr != nil {
+ count, err = cr.Count()
+ } else {
+ count, err = i.resourceCountMax(module, cr, v)
+ }
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Error reading %s count: %s",
+ v.ResourceId(),
+ err)
+ }
+
+ return &ast.Variable{Type: ast.TypeInt, Value: count}, nil
+ }
+
+ // Get the resource out from the state. We know the state exists
+ // at this point and if there is a state, we expect there to be a
+ // resource with the given name.
+ var r *ResourceState
+ if module != nil && len(module.Resources) > 0 {
+ var ok bool
+ r, ok = module.Resources[id]
+ if !ok && v.Multi && v.Index == 0 {
+ r, ok = module.Resources[v.ResourceId()]
+ }
+ if !ok {
+ r = nil
+ }
+ }
+ if r == nil || r.Primary == nil {
+ if i.Operation == walkApply || i.Operation == walkPlan {
+ return nil, fmt.Errorf(
+ "Resource '%s' not found for variable '%s'",
+ v.ResourceId(),
+ v.FullKey())
+ }
+
+ // If we have no module in the state yet or count, return empty.
+ // NOTE(@mitchellh): I actually don't know why this is here. During
+ // a refactor I kept this here to maintain the same behavior, but
+ // I'm not sure why its here.
+ if module == nil || len(module.Resources) == 0 {
+ return nil, nil
+ }
+
+ goto MISSING
+ }
+
+ if attr, ok := r.Primary.Attributes[v.Field]; ok {
+ v, err := hil.InterfaceToVariable(attr)
+ return &v, err
+ }
+
+ // computed list or map attribute
+ _, isList = r.Primary.Attributes[v.Field+".#"]
+ _, isMap = r.Primary.Attributes[v.Field+".%"]
+ if isList || isMap {
+ variable, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
+ return &variable, err
+ }
+
+ // At apply time, we can't do the "maybe has it" check below
+ // that we need for plans since parent elements might be computed.
+ // Therefore, it is an error and we're missing the key.
+ //
+ // TODO: test by creating a state and configuration that is referencing
+ // a non-existent variable "foo.bar" where the state only has "foo"
+ // and verify plan works, but apply doesn't.
+ if i.Operation == walkApply || i.Operation == walkDestroy {
+ goto MISSING
+ }
+
+ // We didn't find the exact field, so lets separate the dots
+ // and see if anything along the way is a computed set. i.e. if
+ // we have "foo.0.bar" as the field, check to see if "foo" is
+ // a computed list. If so, then the whole thing is computed.
+ if parts := strings.Split(v.Field, "."); len(parts) > 1 {
+ for i := 1; i < len(parts); i++ {
+ // Lists and sets make this
+ key := fmt.Sprintf("%s.#", strings.Join(parts[:i], "."))
+ if attr, ok := r.Primary.Attributes[key]; ok {
+ v, err := hil.InterfaceToVariable(attr)
+ return &v, err
+ }
+
+ // Maps make this
+ key = fmt.Sprintf("%s", strings.Join(parts[:i], "."))
+ if attr, ok := r.Primary.Attributes[key]; ok {
+ v, err := hil.InterfaceToVariable(attr)
+ return &v, err
+ }
+ }
+ }
+
+MISSING:
+ // Validation for missing interpolations should happen at a higher
+ // semantic level. If we reached this point and don't have variables,
+ // just return the computed value.
+ if scope == nil && scope.Resource == nil {
+ return &unknownVariable, nil
+ }
+
+ // If the operation is refresh, it isn't an error for a value to
+ // be unknown. Instead, we return that the value is computed so
+ // that the graph can continue to refresh other nodes. It doesn't
+ // matter because the config isn't interpolated anyways.
+ //
+ // For a Destroy, we're also fine with computed values, since our goal is
+ // only to get destroy nodes for existing resources.
+ //
+ // For an input walk, computed values are okay to return because we're only
+ // looking for missing variables to prompt the user for.
+ if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput {
+ return &unknownVariable, nil
+ }
+
+ return nil, fmt.Errorf(
+ "Resource '%s' does not have attribute '%s' "+
+ "for variable '%s'",
+ id,
+ v.Field,
+ v.FullKey())
+}
+
+func (i *Interpolater) computeResourceMultiVariable(
+ scope *InterpolationScope,
+ v *config.ResourceVariable) (*ast.Variable, error) {
+ i.StateLock.RLock()
+ defer i.StateLock.RUnlock()
+
+ unknownVariable := unknownVariable()
+
+ // If we're only looking for input, we don't need to expand a
+ // multi-variable. This prevents us from encountering things that should be
+ // known but aren't because the state has yet to be refreshed.
+ if i.Operation == walkInput {
+ return &unknownVariable, nil
+ }
+
+ // Get the information about this resource variable, and verify
+ // that it exists and such.
+ module, cr, err := i.resourceVariableInfo(scope, v)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the keys for all the resources that are created for this resource
+ countMax, err := i.resourceCountMax(module, cr, v)
+ if err != nil {
+ return nil, err
+ }
+
+ // If count is zero, we return an empty list
+ if countMax == 0 {
+ return &ast.Variable{Type: ast.TypeList, Value: []ast.Variable{}}, nil
+ }
+
+ // If we have no module in the state yet or count, return unknown
+ if module == nil || len(module.Resources) == 0 {
+ return &unknownVariable, nil
+ }
+
+ var values []interface{}
+ for idx := 0; idx < countMax; idx++ {
+ id := fmt.Sprintf("%s.%d", v.ResourceId(), idx)
+
+ // ID doesn't have a trailing index. We try both here, but if a value
+ // without a trailing index is found we prefer that. This choice
+ // is for legacy reasons: older versions of TF preferred it.
+ if id == v.ResourceId()+".0" {
+ potential := v.ResourceId()
+ if _, ok := module.Resources[potential]; ok {
+ id = potential
+ }
+ }
+
+ r, ok := module.Resources[id]
+ if !ok {
+ continue
+ }
+
+ if r.Primary == nil {
+ continue
+ }
+
+ if singleAttr, ok := r.Primary.Attributes[v.Field]; ok {
+ if singleAttr == config.UnknownVariableValue {
+ return &unknownVariable, nil
+ }
+
+ values = append(values, singleAttr)
+ continue
+ }
+
+ // computed list or map attribute
+ _, isList := r.Primary.Attributes[v.Field+".#"]
+ _, isMap := r.Primary.Attributes[v.Field+".%"]
+ if !(isList || isMap) {
+ continue
+ }
+ multiAttr, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
+ if err != nil {
+ return nil, err
+ }
+
+ if multiAttr == unknownVariable {
+ return &unknownVariable, nil
+ }
+
+ values = append(values, multiAttr)
+ }
+
+ if len(values) == 0 {
+ // If the operation is refresh, it isn't an error for a value to
+ // be unknown. Instead, we return that the value is computed so
+ // that the graph can continue to refresh other nodes. It doesn't
+ // matter because the config isn't interpolated anyways.
+ //
+ // For a Destroy, we're also fine with computed values, since our goal is
+ // only to get destroy nodes for existing resources.
+ //
+ // For an input walk, computed values are okay to return because we're only
+ // looking for missing variables to prompt the user for.
+ if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput {
+ return &unknownVariable, nil
+ }
+
+ return nil, fmt.Errorf(
+ "Resource '%s' does not have attribute '%s' "+
+ "for variable '%s'",
+ v.ResourceId(),
+ v.Field,
+ v.FullKey())
+ }
+
+ variable, err := hil.InterfaceToVariable(values)
+ return &variable, err
+}
+
+func (i *Interpolater) interpolateComplexTypeAttribute(
+ resourceID string,
+ attributes map[string]string) (ast.Variable, error) {
+
+ // We can now distinguish between lists and maps in state by the count field:
+ // - lists (and by extension, sets) use the traditional .# notation
+ // - maps use the newer .% notation
+ // Consequently here we can decide how to deal with the keys appropriately
+ // based on whether the type is a map of list.
+ if lengthAttr, isList := attributes[resourceID+".#"]; isList {
+ log.Printf("[DEBUG] Interpolating computed list element attribute %s (%s)",
+ resourceID, lengthAttr)
+
+ // In Terraform's internal dotted representation of list-like attributes, the
+ // ".#" count field is marked as unknown to indicate "this whole list is
+ // unknown". We must honor that meaning here so computed references can be
+ // treated properly during the plan phase.
+ if lengthAttr == config.UnknownVariableValue {
+ return unknownVariable(), nil
+ }
+
+ expanded := flatmap.Expand(attributes, resourceID)
+ return hil.InterfaceToVariable(expanded)
+ }
+
+ if lengthAttr, isMap := attributes[resourceID+".%"]; isMap {
+ log.Printf("[DEBUG] Interpolating computed map element attribute %s (%s)",
+ resourceID, lengthAttr)
+
+ // In Terraform's internal dotted representation of map attributes, the
+ // ".%" count field is marked as unknown to indicate "this whole list is
+ // unknown". We must honor that meaning here so computed references can be
+ // treated properly during the plan phase.
+ if lengthAttr == config.UnknownVariableValue {
+ return unknownVariable(), nil
+ }
+
+ expanded := flatmap.Expand(attributes, resourceID)
+ return hil.InterfaceToVariable(expanded)
+ }
+
+ return ast.Variable{}, fmt.Errorf("No complex type %s found", resourceID)
+}
+
+func (i *Interpolater) resourceVariableInfo(
+ scope *InterpolationScope,
+ v *config.ResourceVariable) (*ModuleState, *config.Resource, error) {
+ // Get the module tree that contains our current path. This is
+ // either the current module (path is empty) or a child.
+ modTree := i.Module
+ if len(scope.Path) > 1 {
+ modTree = i.Module.Child(scope.Path[1:])
+ }
+
+ // Get the resource from the configuration so we can verify
+ // that the resource is in the configuration and so we can access
+ // the configuration if we need to.
+ var cr *config.Resource
+ for _, r := range modTree.Config().Resources {
+ if r.Id() == v.ResourceId() {
+ cr = r
+ break
+ }
+ }
+
+ // Get the relevant module
+ module := i.State.ModuleByPath(scope.Path)
+ return module, cr, nil
+}
+
+func (i *Interpolater) resourceCountMax(
+ ms *ModuleState,
+ cr *config.Resource,
+ v *config.ResourceVariable) (int, error) {
+ id := v.ResourceId()
+
+ // If we're NOT applying, then we assume we can read the count
+ // from the state. Plan and so on may not have any state yet so
+ // we do a full interpolation.
+ if i.Operation != walkApply {
+ if cr == nil {
+ return 0, nil
+ }
+
+ count, err := cr.Count()
+ if err != nil {
+ return 0, err
+ }
+
+ return count, nil
+ }
+
+ // We need to determine the list of resource keys to get values from.
+ // This needs to be sorted so the order is deterministic. We used to
+ // use "cr.Count()" but that doesn't work if the count is interpolated
+ // and we can't guarantee that so we instead depend on the state.
+ max := -1
+ for k, _ := range ms.Resources {
+ // Get the index number for this resource
+ index := ""
+ if k == id {
+ // If the key is the id, then its just 0 (no explicit index)
+ index = "0"
+ } else if strings.HasPrefix(k, id+".") {
+ // Grab the index number out of the state
+ index = k[len(id+"."):]
+ if idx := strings.IndexRune(index, '.'); idx >= 0 {
+ index = index[:idx]
+ }
+ }
+
+ // If there was no index then this resource didn't match
+ // the one we're looking for, exit.
+ if index == "" {
+ continue
+ }
+
+ // Turn the index into an int
+ raw, err := strconv.ParseInt(index, 0, 0)
+ if err != nil {
+ return 0, fmt.Errorf(
+ "%s: error parsing index %q as int: %s",
+ id, index, err)
+ }
+
+ // Keep track of this index if its the max
+ if new := int(raw); new > max {
+ max = new
+ }
+ }
+
+ // If we never found any matching resources in the state, we
+ // have zero.
+ if max == -1 {
+ return 0, nil
+ }
+
+ // The result value is "max+1" because we're returning the
+ // max COUNT, not the max INDEX, and we zero-index.
+ return max + 1, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
new file mode 100644
index 00000000..bd32c79f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
@@ -0,0 +1,14 @@
+package terraform
+
+// NodeCountBoundary fixes any "count boundarie" in the state: resources
+// that are named "foo.0" when they should be named "foo"
+type NodeCountBoundary struct{}
+
+func (n *NodeCountBoundary) Name() string {
+ return "meta.count-boundary (count boundary fixup)"
+}
+
+// GraphNodeEvalable
+func (n *NodeCountBoundary) EvalTree() EvalNode {
+ return &EvalCountFixZeroOneBoundaryGlobal{}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
new file mode 100644
index 00000000..e32cea88
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
@@ -0,0 +1,22 @@
+package terraform
+
+// NodeDestroyableDataResource represents a resource that is "plannable":
+// it is ready to be planned in order to create a diff.
+type NodeDestroyableDataResource struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodeDestroyableDataResource) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Just destroy it.
+ var state *InstanceState
+ return &EvalWriteState{
+ Name: stateId,
+ State: &state, // state is nil here
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
new file mode 100644
index 00000000..d504c892
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
@@ -0,0 +1,198 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// NodeRefreshableDataResource represents a resource that is "plannable":
+// it is ready to be planned in order to create a diff.
+type NodeRefreshableDataResource struct {
+ *NodeAbstractCountResource
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ // Grab the state which we read
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Expand the resource count which must be available by now from EvalTree
+ count, err := n.Config.Count()
+ if err != nil {
+ return nil, err
+ }
+
+ // The concrete resource factory we'll use
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ // Add the config and state since we don't do that via transforms
+ a.Config = n.Config
+
+ return &NodeRefreshableDataResourceInstance{
+ NodeAbstractResource: a,
+ }
+ }
+
+ // Start creating the steps
+ steps := []GraphTransformer{
+ // Expand the count.
+ &ResourceCountTransformer{
+ Concrete: concreteResource,
+ Count: count,
+ Addr: n.ResourceAddr(),
+ },
+
+ // Attach the state
+ &AttachStateTransformer{State: state},
+
+ // Targeting
+ &TargetsTransformer{ParsedTargets: n.Targets},
+
+ // Connect references so ordering is correct
+ &ReferenceTransformer{},
+
+ // Make sure there is a single root
+ &RootTransformer{},
+ }
+
+ // Build the graph
+ b := &BasicGraphBuilder{
+ Steps: steps,
+ Validate: true,
+ Name: "NodeRefreshableDataResource",
+ }
+
+ return b.Build(ctx.Path())
+}
+
+// NodeRefreshableDataResourceInstance represents a _single_ resource instance
+// that is refreshable.
+type NodeRefreshableDataResourceInstance struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ }
+
+ // Get the state if we have it, if not we build it
+ rs := n.ResourceState
+ if rs == nil {
+ rs = &ResourceState{}
+ }
+
+ // If the config isn't empty we update the state
+ if n.Config != nil {
+ rs = &ResourceState{
+ Type: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: n.StateReferences(),
+ }
+ }
+
+ // Build the resource for eval
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var config *ResourceConfig
+ var diff *InstanceDiff
+ var provider ResourceProvider
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ // Always destroy the existing state first, since we must
+ // make sure that values from a previous read will not
+ // get interpolated if we end up needing to defer our
+ // loading until apply time.
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: rs.Type,
+ Provider: rs.Provider,
+ Dependencies: rs.Dependencies,
+ State: &state, // state is nil here
+ },
+
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+
+ // The rest of this pass can proceed only if there are no
+ // computed values in our config.
+ // (If there are, we'll deal with this during the plan and
+ // apply phases.)
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 {
+ return true, EvalEarlyExitError{}
+ }
+
+ // If the config explicitly has a depends_on for this
+ // data source, assume the intention is to prevent
+ // refreshing ahead of that dependency.
+ if len(n.Config.DependsOn) > 0 {
+ return true, EvalEarlyExitError{}
+ }
+
+ return true, nil
+ },
+
+ Then: EvalNoop{},
+ },
+
+ // The remainder of this pass is the same as running
+ // a "plan" pass immediately followed by an "apply" pass,
+ // populating the state early so it'll be available to
+ // provider configurations that need this data during
+ // refresh/plan.
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+
+ &EvalReadDataDiff{
+ Info: info,
+ Config: &config,
+ Provider: &provider,
+ Output: &diff,
+ OutputState: &state,
+ },
+
+ &EvalReadDataApply{
+ Info: info,
+ Diff: &diff,
+ Provider: &provider,
+ Output: &state,
+ },
+
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: rs.Type,
+ Provider: rs.Provider,
+ Dependencies: rs.Dependencies,
+ State: &state,
+ },
+
+ &EvalUpdateStateHook{},
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
new file mode 100644
index 00000000..319df1e3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
@@ -0,0 +1,29 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// NodeDestroyableModule represents a module destruction.
+type NodeDestroyableModuleVariable struct {
+ PathValue []string
+}
+
+func (n *NodeDestroyableModuleVariable) Name() string {
+ result := "plan-destroy"
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeDestroyableModuleVariable) Path() []string {
+ return n.PathValue
+}
+
+// GraphNodeEvalable
+func (n *NodeDestroyableModuleVariable) EvalTree() EvalNode {
+ return &EvalDiffDestroyModule{Path: n.PathValue}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
new file mode 100644
index 00000000..13fe8fc3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
@@ -0,0 +1,125 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// NodeApplyableModuleVariable represents a module variable input during
+// the apply step.
+type NodeApplyableModuleVariable struct {
+ PathValue []string
+ Config *config.Variable // Config is the var in the config
+ Value *config.RawConfig // Value is the value that is set
+
+ Module *module.Tree // Antiquated, want to remove
+}
+
+func (n *NodeApplyableModuleVariable) Name() string {
+ result := fmt.Sprintf("var.%s", n.Config.Name)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeApplyableModuleVariable) Path() []string {
+ // We execute in the parent scope (above our own module) so that
+ // we can access the proper interpolations.
+ if len(n.PathValue) > 2 {
+ return n.PathValue[:len(n.PathValue)-1]
+ }
+
+ return rootModulePath
+}
+
+// RemovableIfNotTargeted
+func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool {
+ // We need to add this so that this node will be removed if
+ // it isn't targeted or a dependency of a target.
+ return true
+}
+
+// GraphNodeReferenceGlobal
+func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool {
+ // We have to create fully qualified references because we cross
+ // boundaries here: our ReferenceableName is in one path and our
+ // References are from another path.
+ return true
+}
+
+// GraphNodeReferenceable
+func (n *NodeApplyableModuleVariable) ReferenceableName() []string {
+ return []string{n.Name()}
+}
+
+// GraphNodeReferencer
+func (n *NodeApplyableModuleVariable) References() []string {
+ // If we have no value set, we depend on nothing
+ if n.Value == nil {
+ return nil
+ }
+
+ // Can't depend on anything if we're in the root
+ if len(n.PathValue) < 2 {
+ return nil
+ }
+
+ // Otherwise, we depend on anything that is in our value, but
+ // specifically in the namespace of the parent path.
+ // Create the prefix based on the path
+ var prefix string
+ if p := n.Path(); len(p) > 0 {
+ prefix = modulePrefixStr(p)
+ }
+
+ result := ReferencesFromConfig(n.Value)
+ return modulePrefixList(result, prefix)
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableModuleVariable) EvalTree() EvalNode {
+ // If we have no value, do nothing
+ if n.Value == nil {
+ return &EvalNoop{}
+ }
+
+ // Otherwise, interpolate the value of this variable and set it
+ // within the variables mapping.
+ var config *ResourceConfig
+ variables := make(map[string]interface{})
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalInterpolate{
+ Config: n.Value,
+ Output: &config,
+ },
+
+ &EvalVariableBlock{
+ Config: &config,
+ VariableValues: variables,
+ },
+
+ &EvalCoerceMapVariable{
+ Variables: variables,
+ ModulePath: n.PathValue,
+ ModuleTree: n.Module,
+ },
+
+ &EvalTypeCheckVariable{
+ Variables: variables,
+ ModulePath: n.PathValue,
+ ModuleTree: n.Module,
+ },
+
+ &EvalSetVariables{
+ Module: &n.PathValue[len(n.PathValue)-1],
+ Variables: variables,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
new file mode 100644
index 00000000..e28e6f02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
@@ -0,0 +1,76 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeApplyableOutput represents an output that is "applyable":
+// it is ready to be applied.
+type NodeApplyableOutput struct {
+ PathValue []string
+ Config *config.Output // Config is the output in the config
+}
+
+func (n *NodeApplyableOutput) Name() string {
+ result := fmt.Sprintf("output.%s", n.Config.Name)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeApplyableOutput) Path() []string {
+ return n.PathValue
+}
+
+// RemovableIfNotTargeted
+func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool {
+ // We need to add this so that this node will be removed if
+ // it isn't targeted or a dependency of a target.
+ return true
+}
+
+// GraphNodeReferenceable
+func (n *NodeApplyableOutput) ReferenceableName() []string {
+ name := fmt.Sprintf("output.%s", n.Config.Name)
+ return []string{name}
+}
+
+// GraphNodeReferencer
+func (n *NodeApplyableOutput) References() []string {
+ var result []string
+ result = append(result, n.Config.DependsOn...)
+ result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
+ for _, v := range result {
+ split := strings.Split(v, "/")
+ for i, s := range split {
+ split[i] = s + ".destroy"
+ }
+
+ result = append(result, strings.Join(split, "/"))
+ }
+
+ return result
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableOutput) EvalTree() EvalNode {
+ return &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh, walkPlan, walkApply,
+ walkDestroy, walkInput, walkValidate},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalWriteOutput{
+ Name: n.Config.Name,
+ Sensitive: n.Config.Sensitive,
+ Value: n.Config.RawConfig,
+ },
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
new file mode 100644
index 00000000..636a15df
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
@@ -0,0 +1,35 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// NodeOutputOrphan represents an output that is an orphan.
+type NodeOutputOrphan struct {
+ OutputName string
+ PathValue []string
+}
+
+func (n *NodeOutputOrphan) Name() string {
+ result := fmt.Sprintf("output.%s (orphan)", n.OutputName)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeOutputOrphan) Path() []string {
+ return n.PathValue
+}
+
+// GraphNodeEvalable
+func (n *NodeOutputOrphan) EvalTree() EvalNode {
+ return &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
+ Node: &EvalDeleteOutput{
+ Name: n.OutputName,
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
new file mode 100644
index 00000000..8e2c176f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
@@ -0,0 +1,11 @@
+package terraform
+
+// NodeApplyableProvider represents a provider during an apply.
+type NodeApplyableProvider struct {
+ *NodeAbstractProvider
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableProvider) EvalTree() EvalNode {
+ return ProviderEvalTree(n.NameValue, n.ProviderConfig())
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
new file mode 100644
index 00000000..6cc83656
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
@@ -0,0 +1,85 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ConcreteProviderNodeFunc is a callback type used to convert an
+// abstract provider to a concrete one of some type.
+type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex
+
+// NodeAbstractProvider represents a provider that has no associated operations.
+// It registers all the common interfaces across operations for providers.
+type NodeAbstractProvider struct {
+ NameValue string
+ PathValue []string
+
+ // The fields below will be automatically set using the Attach
+ // interfaces if you're running those transforms, but also be explicitly
+ // set if you already have that information.
+
+ Config *config.ProviderConfig
+}
+
+func (n *NodeAbstractProvider) Name() string {
+ result := fmt.Sprintf("provider.%s", n.NameValue)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeAbstractProvider) Path() []string {
+ return n.PathValue
+}
+
+// RemovableIfNotTargeted
+func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool {
+ // We need to add this so that this node will be removed if
+ // it isn't targeted or a dependency of a target.
+ return true
+}
+
+// GraphNodeReferencer
+func (n *NodeAbstractProvider) References() []string {
+ if n.Config == nil {
+ return nil
+ }
+
+ return ReferencesFromConfig(n.Config.RawConfig)
+}
+
+// GraphNodeProvider
+func (n *NodeAbstractProvider) ProviderName() string {
+ return n.NameValue
+}
+
+// GraphNodeProvider
+func (n *NodeAbstractProvider) ProviderConfig() *config.RawConfig {
+ if n.Config == nil {
+ return nil
+ }
+
+ return n.Config.RawConfig
+}
+
+// GraphNodeAttachProvider
+func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) {
+ n.Config = c
+}
+
+// GraphNodeDotter impl.
+func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
+ return &dag.DotNode{
+ Name: name,
+ Attrs: map[string]string{
+ "label": n.Name(),
+ "shape": "diamond",
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
new file mode 100644
index 00000000..25e7e620
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
@@ -0,0 +1,38 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// NodeDisabledProvider represents a provider that is disabled. A disabled
+// provider does nothing. It exists to properly set inheritance information
+// for child providers.
+type NodeDisabledProvider struct {
+ *NodeAbstractProvider
+}
+
+func (n *NodeDisabledProvider) Name() string {
+ return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name())
+}
+
+// GraphNodeEvalable
+func (n *NodeDisabledProvider) EvalTree() EvalNode {
+ var resourceConfig *ResourceConfig
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalInterpolate{
+ Config: n.ProviderConfig(),
+ Output: &resourceConfig,
+ },
+ &EvalBuildProviderConfig{
+ Provider: n.ProviderName(),
+ Config: &resourceConfig,
+ Output: &resourceConfig,
+ },
+ &EvalSetProviderConfig{
+ Provider: n.ProviderName(),
+ Config: &resourceConfig,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
new file mode 100644
index 00000000..bb117c1d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
@@ -0,0 +1,44 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeProvisioner represents a provider that has no associated operations.
+// It registers all the common interfaces across operations for providers.
+type NodeProvisioner struct {
+ NameValue string
+ PathValue []string
+
+ // The fields below will be automatically set using the Attach
+ // interfaces if you're running those transforms, but also be explicitly
+ // set if you already have that information.
+
+ Config *config.ProviderConfig
+}
+
+func (n *NodeProvisioner) Name() string {
+ result := fmt.Sprintf("provisioner.%s", n.NameValue)
+ if len(n.PathValue) > 1 {
+ result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+ }
+
+ return result
+}
+
+// GraphNodeSubPath
+func (n *NodeProvisioner) Path() []string {
+ return n.PathValue
+}
+
+// GraphNodeProvisioner
+func (n *NodeProvisioner) ProvisionerName() string {
+ return n.NameValue
+}
+
+// GraphNodeEvalable impl.
+func (n *NodeProvisioner) EvalTree() EvalNode {
+ return &EvalInitProvisioner{Name: n.NameValue}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
new file mode 100644
index 00000000..50bb7079
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
@@ -0,0 +1,240 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ConcreteResourceNodeFunc is a callback type used to convert an
+// abstract resource to a concrete one of some type.
+type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex
+
+// GraphNodeResource is implemented by any nodes that represent a resource.
+// The type of operation cannot be assumed, only that this node represents
+// the given resource.
+type GraphNodeResource interface {
+ ResourceAddr() *ResourceAddress
+}
+
+// NodeAbstractResource represents a resource that has no associated
+// operations. It registers all the interfaces for a resource that common
+// across multiple operation types.
+type NodeAbstractResource struct {
+ Addr *ResourceAddress // Addr is the address for this resource
+
+ // The fields below will be automatically set using the Attach
+ // interfaces if you're running those transforms, but also be explicitly
+ // set if you already have that information.
+
+ Config *config.Resource // Config is the resource in the config
+ ResourceState *ResourceState // ResourceState is the ResourceState for this
+
+ Targets []ResourceAddress // Set from GraphNodeTargetable
+}
+
+func (n *NodeAbstractResource) Name() string {
+ return n.Addr.String()
+}
+
+// GraphNodeSubPath
+func (n *NodeAbstractResource) Path() []string {
+ return n.Addr.Path
+}
+
+// GraphNodeReferenceable
+func (n *NodeAbstractResource) ReferenceableName() []string {
+ // We always are referenceable as "type.name" as long as
+ // we have a config or address. Determine what that value is.
+ var id string
+ if n.Config != nil {
+ id = n.Config.Id()
+ } else if n.Addr != nil {
+ addrCopy := n.Addr.Copy()
+ addrCopy.Path = nil // ReferenceTransformer handles paths
+ addrCopy.Index = -1 // We handle indexes below
+ id = addrCopy.String()
+ } else {
+ // No way to determine our type.name, just return
+ return nil
+ }
+
+ var result []string
+
+ // Always include our own ID. This is primarily for backwards
+ // compatibility with states that didn't yet support the more
+ // specific dep string.
+ result = append(result, id)
+
+ // We represent all multi-access
+ result = append(result, fmt.Sprintf("%s.*", id))
+
+ // We represent either a specific number, or all numbers
+ suffix := "N"
+ if n.Addr != nil {
+ idx := n.Addr.Index
+ if idx == -1 {
+ idx = 0
+ }
+
+ suffix = fmt.Sprintf("%d", idx)
+ }
+ result = append(result, fmt.Sprintf("%s.%s", id, suffix))
+
+ return result
+}
+
+// GraphNodeReferencer
+func (n *NodeAbstractResource) References() []string {
+ // If we have a config, that is our source of truth
+ if c := n.Config; c != nil {
+ // Grab all the references
+ var result []string
+ result = append(result, c.DependsOn...)
+ result = append(result, ReferencesFromConfig(c.RawCount)...)
+ result = append(result, ReferencesFromConfig(c.RawConfig)...)
+ for _, p := range c.Provisioners {
+ if p.When == config.ProvisionerWhenCreate {
+ result = append(result, ReferencesFromConfig(p.ConnInfo)...)
+ result = append(result, ReferencesFromConfig(p.RawConfig)...)
+ }
+ }
+
+ return uniqueStrings(result)
+ }
+
+ // If we have state, that is our next source
+ if s := n.ResourceState; s != nil {
+ return s.Dependencies
+ }
+
+ return nil
+}
+
+// StateReferences returns the dependencies to put into the state for
+// this resource.
+func (n *NodeAbstractResource) StateReferences() []string {
+ self := n.ReferenceableName()
+
+ // Determine what our "prefix" is for checking for references to
+ // ourself.
+ addrCopy := n.Addr.Copy()
+ addrCopy.Index = -1
+ selfPrefix := addrCopy.String() + "."
+
+ depsRaw := n.References()
+ deps := make([]string, 0, len(depsRaw))
+ for _, d := range depsRaw {
+ // Ignore any variable dependencies
+ if strings.HasPrefix(d, "var.") {
+ continue
+ }
+
+ // If this has a backup ref, ignore those for now. The old state
+ // file never contained those and I'd rather store the rich types we
+ // add in the future.
+ if idx := strings.IndexRune(d, '/'); idx != -1 {
+ d = d[:idx]
+ }
+
+ // If we're referencing ourself, then ignore it
+ found := false
+ for _, s := range self {
+ if d == s {
+ found = true
+ }
+ }
+ if found {
+ continue
+ }
+
+ // If this is a reference to ourself and a specific index, we keep
+ // it. For example, if this resource is "foo.bar" and the reference
+ // is "foo.bar.0" then we keep it exact. Otherwise, we strip it.
+ if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) {
+ d = d[:len(d)-2]
+ }
+
+ // This is sad. The dependencies are currently in the format of
+ // "module.foo.bar" (the full field). This strips the field off.
+ if strings.HasPrefix(d, "module.") {
+ parts := strings.SplitN(d, ".", 3)
+ d = strings.Join(parts[0:2], ".")
+ }
+
+ deps = append(deps, d)
+ }
+
+ return deps
+}
+
+// GraphNodeProviderConsumer
+func (n *NodeAbstractResource) ProvidedBy() []string {
+ // If we have a config we prefer that above all else
+ if n.Config != nil {
+ return []string{resourceProvider(n.Config.Type, n.Config.Provider)}
+ }
+
+ // If we have state, then we will use the provider from there
+ if n.ResourceState != nil && n.ResourceState.Provider != "" {
+ return []string{n.ResourceState.Provider}
+ }
+
+ // Use our type
+ return []string{resourceProvider(n.Addr.Type, "")}
+}
+
+// GraphNodeProvisionerConsumer
+func (n *NodeAbstractResource) ProvisionedBy() []string {
+ // If we have no configuration, then we have no provisioners
+ if n.Config == nil {
+ return nil
+ }
+
+ // Build the list of provisioners we need based on the configuration.
+ // It is okay to have duplicates here.
+ result := make([]string, len(n.Config.Provisioners))
+ for i, p := range n.Config.Provisioners {
+ result[i] = p.Type
+ }
+
+ return result
+}
+
+// GraphNodeResource, GraphNodeAttachResourceState
+func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress {
+ return n.Addr
+}
+
+// GraphNodeAddressable, TODO: remove, used by target, should unify
+func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress {
+ return n.ResourceAddr()
+}
+
+// GraphNodeTargetable
+func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) {
+ n.Targets = targets
+}
+
+// GraphNodeAttachResourceState
+func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) {
+ n.ResourceState = s
+}
+
+// GraphNodeAttachResourceConfig
+func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) {
+ n.Config = c
+}
+
+// GraphNodeDotter impl.
+func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
+ return &dag.DotNode{
+ Name: name,
+ Attrs: map[string]string{
+ "label": n.Name(),
+ "shape": "box",
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
new file mode 100644
index 00000000..573570d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
@@ -0,0 +1,50 @@
+package terraform
+
+// NodeAbstractCountResource should be embedded instead of NodeAbstractResource
+// if the resource has a `count` value that needs to be expanded.
+//
+// The embedder should implement `DynamicExpand` to process the count.
+type NodeAbstractCountResource struct {
+ *NodeAbstractResource
+
+ // Validate, if true, will perform the validation for the count.
+ // This should only be turned on for the "validate" operation.
+ Validate bool
+}
+
+// GraphNodeEvalable
+func (n *NodeAbstractCountResource) EvalTree() EvalNode {
+ // We only check if the count is computed if we're not validating.
+ // If we're validating we allow computed counts since they just turn
+ // into more computed values.
+ var evalCountCheckComputed EvalNode
+ if !n.Validate {
+ evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config}
+ }
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ // The EvalTree for a plannable resource primarily involves
+ // interpolating the count since it can contain variables
+ // we only just received access to.
+ //
+ // With the interpolated count, we can then DynamicExpand
+ // into the proper number of instances.
+ &EvalInterpolate{Config: n.Config.RawCount},
+
+ // Check if the count is computed
+ evalCountCheckComputed,
+
+ // If validation is enabled, perform the validation
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ return n.Validate, nil
+ },
+
+ Then: &EvalValidateCount{Resource: n.Config},
+ },
+
+ &EvalCountFixZeroOneBoundary{Resource: n.Config},
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
new file mode 100644
index 00000000..3599782b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
@@ -0,0 +1,357 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeApplyableResource represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodeApplyableResource struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeCreator
+func (n *NodeApplyableResource) CreateAddr() *ResourceAddress {
+ return n.NodeAbstractResource.Addr
+}
+
+// GraphNodeReferencer, overriding NodeAbstractResource
+func (n *NodeApplyableResource) References() []string {
+ result := n.NodeAbstractResource.References()
+
+ // The "apply" side of a resource generally also depends on the
+ // destruction of its dependencies as well. For example, if a LB
+ // references a set of VMs with ${vm.foo.*.id}, then we must wait for
+ // the destruction so we get the newly updated list of VMs.
+ //
+ // The exception here is CBD. When CBD is set, we don't do this since
+ // it would create a cycle. By not creating a cycle, we require two
+ // applies since the first apply the creation step will use the OLD
+ // values (pre-destroy) and the second step will update.
+ //
+ // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x).
+ // We mimic that behavior here now and can improve upon it in the future.
+ //
+ // This behavior is tested in graph_build_apply_test.go to test ordering.
+ cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy
+ if !cbd {
+ // The "apply" side of a resource always depends on the destruction
+ // of all its dependencies in addition to the creation.
+ for _, v := range result {
+ result = append(result, v+".destroy")
+ }
+ }
+
+ return result
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableResource) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ }
+
+ // Build the resource for eval
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Determine the dependencies for the state.
+ stateDeps := n.StateReferences()
+
+ // Eval info is different depending on what kind of resource this is
+ switch n.Config.Mode {
+ case config.ManagedResourceMode:
+ return n.evalTreeManagedResource(
+ stateId, info, resource, stateDeps,
+ )
+ case config.DataResourceMode:
+ return n.evalTreeDataResource(
+ stateId, info, resource, stateDeps)
+ default:
+ panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
+ }
+}
+
+func (n *NodeApplyableResource) evalTreeDataResource(
+ stateId string, info *InstanceInfo,
+ resource *Resource, stateDeps []string) EvalNode {
+ var provider ResourceProvider
+ var config *ResourceConfig
+ var diff *InstanceDiff
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ // Build the instance info
+ &EvalInstanceInfo{
+ Info: info,
+ },
+
+ // Get the saved diff for apply
+ &EvalReadDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+
+ // Stop here if we don't actually have a diff
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if diff == nil {
+ return true, EvalEarlyExitError{}
+ }
+
+ if diff.GetAttributesLen() == 0 {
+ return true, EvalEarlyExitError{}
+ }
+
+ return true, nil
+ },
+ Then: EvalNoop{},
+ },
+
+ // We need to re-interpolate the config here, rather than
+ // just using the diff's values directly, because we've
+ // potentially learned more variable values during the
+ // apply pass that weren't known when the diff was produced.
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+
+ // Make a new diff with our newly-interpolated config.
+ &EvalReadDataDiff{
+ Info: info,
+ Config: &config,
+ Previous: &diff,
+ Provider: &provider,
+ Output: &diff,
+ },
+
+ &EvalReadDataApply{
+ Info: info,
+ Diff: &diff,
+ Provider: &provider,
+ Output: &state,
+ },
+
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+
+ // Clear the diff now that we've applied it, so
+ // later nodes won't see a diff that's now a no-op.
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: nil,
+ },
+
+ &EvalUpdateStateHook{},
+ },
+ }
+}
+
+func (n *NodeApplyableResource) evalTreeManagedResource(
+ stateId string, info *InstanceInfo,
+ resource *Resource, stateDeps []string) EvalNode {
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var provider ResourceProvider
+ var diff, diffApply *InstanceDiff
+ var state *InstanceState
+ var resourceConfig *ResourceConfig
+ var err error
+ var createNew bool
+ var createBeforeDestroyEnabled bool
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ // Build the instance info
+ &EvalInstanceInfo{
+ Info: info,
+ },
+
+ // Get the saved diff for apply
+ &EvalReadDiff{
+ Name: stateId,
+ Diff: &diffApply,
+ },
+
+ // We don't want to do any destroys
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if diffApply == nil {
+ return true, EvalEarlyExitError{}
+ }
+
+ if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 {
+ return true, EvalEarlyExitError{}
+ }
+
+ diffApply.SetDestroy(false)
+ return true, nil
+ },
+ Then: EvalNoop{},
+ },
+
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ destroy := false
+ if diffApply != nil {
+ destroy = diffApply.GetDestroy() || diffApply.RequiresNew()
+ }
+
+ createBeforeDestroyEnabled =
+ n.Config.Lifecycle.CreateBeforeDestroy &&
+ destroy
+
+ return createBeforeDestroyEnabled, nil
+ },
+ Then: &EvalDeposeState{
+ Name: stateId,
+ },
+ },
+
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &resourceConfig,
+ },
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ // Re-run validation to catch any errors we missed, e.g. type
+ // mismatches on computed values.
+ &EvalValidateResource{
+ Provider: &provider,
+ Config: &resourceConfig,
+ ResourceName: n.Config.Name,
+ ResourceType: n.Config.Type,
+ ResourceMode: n.Config.Mode,
+ IgnoreWarnings: true,
+ },
+ &EvalDiff{
+ Info: info,
+ Config: &resourceConfig,
+ Resource: n.Config,
+ Provider: &provider,
+ Diff: &diffApply,
+ State: &state,
+ OutputDiff: &diffApply,
+ },
+
+ // Get the saved diff
+ &EvalReadDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+
+ // Compare the diffs
+ &EvalCompareDiff{
+ Info: info,
+ One: &diff,
+ Two: &diffApply,
+ },
+
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ // Call pre-apply hook
+ &EvalApplyPre{
+ Info: info,
+ State: &state,
+ Diff: &diffApply,
+ },
+ &EvalApply{
+ Info: info,
+ State: &state,
+ Diff: &diffApply,
+ Provider: &provider,
+ Output: &state,
+ Error: &err,
+ CreateNew: &createNew,
+ },
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+ &EvalApplyProvisioners{
+ Info: info,
+ State: &state,
+ Resource: n.Config,
+ InterpResource: resource,
+ CreateNew: &createNew,
+ Error: &err,
+ When: config.ProvisionerWhenCreate,
+ },
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ return createBeforeDestroyEnabled && err != nil, nil
+ },
+ Then: &EvalUndeposeState{
+ Name: stateId,
+ State: &state,
+ },
+ Else: &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+ },
+
+ // We clear the diff out here so that future nodes
+ // don't see a diff that is already complete. There
+ // is no longer a diff!
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: nil,
+ },
+
+ &EvalApplyPost{
+ Info: info,
+ State: &state,
+ Error: &err,
+ },
+ &EvalUpdateStateHook{},
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
new file mode 100644
index 00000000..c2efd2c3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
@@ -0,0 +1,288 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeDestroyResource represents a resource that is to be destroyed.
+type NodeDestroyResource struct {
+ *NodeAbstractResource
+}
+
+func (n *NodeDestroyResource) Name() string {
+ return n.NodeAbstractResource.Name() + " (destroy)"
+}
+
+// GraphNodeDestroyer
+func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress {
+ return n.Addr
+}
+
+// GraphNodeDestroyerCBD
+func (n *NodeDestroyResource) CreateBeforeDestroy() bool {
+ // If we have no config, we just assume no
+ if n.Config == nil {
+ return false
+ }
+
+ return n.Config.Lifecycle.CreateBeforeDestroy
+}
+
+// GraphNodeDestroyerCBD
+func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error {
+ // If we have no config, do nothing since it won't affect the
+ // create step anyways.
+ if n.Config == nil {
+ return nil
+ }
+
+ // Set CBD to true
+ n.Config.Lifecycle.CreateBeforeDestroy = true
+
+ return nil
+}
+
+// GraphNodeReferenceable, overriding NodeAbstractResource
+func (n *NodeDestroyResource) ReferenceableName() []string {
+ // We modify our referenceable name to have the suffix of ".destroy"
+ // since depending on the creation side doesn't necessarilly mean
+ // depending on destruction.
+ suffix := ".destroy"
+
+ // If we're CBD, we also append "-cbd". This is because CBD will setup
+ // its own edges (in CBDEdgeTransformer). Depending on the "destroy"
+ // side generally doesn't mean depending on CBD as well. See GH-11349
+ if n.CreateBeforeDestroy() {
+ suffix += "-cbd"
+ }
+
+ result := n.NodeAbstractResource.ReferenceableName()
+ for i, v := range result {
+ result[i] = v + suffix
+ }
+
+ return result
+}
+
+// GraphNodeReferencer, overriding NodeAbstractResource
+func (n *NodeDestroyResource) References() []string {
+ // If we have a config, then we need to include destroy-time dependencies
+ if c := n.Config; c != nil {
+ var result []string
+ for _, p := range c.Provisioners {
+ // We include conn info and config for destroy time provisioners
+ // as dependencies that we have.
+ if p.When == config.ProvisionerWhenDestroy {
+ result = append(result, ReferencesFromConfig(p.ConnInfo)...)
+ result = append(result, ReferencesFromConfig(p.RawConfig)...)
+ }
+ }
+
+ return result
+ }
+
+ return nil
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ // If we have no config we do nothing
+ if n.Addr == nil {
+ return nil, nil
+ }
+
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Start creating the steps
+ steps := make([]GraphTransformer, 0, 5)
+
+ // We want deposed resources in the state to be destroyed
+ steps = append(steps, &DeposedTransformer{
+ State: state,
+ View: n.Addr.stateId(),
+ })
+
+ // Target
+ steps = append(steps, &TargetsTransformer{
+ ParsedTargets: n.Targets,
+ })
+
+ // Always end with the root being added
+ steps = append(steps, &RootTransformer{})
+
+ // Build the graph
+ b := &BasicGraphBuilder{
+ Steps: steps,
+ Name: "NodeResourceDestroy",
+ }
+ return b.Build(ctx.Path())
+}
+
+// GraphNodeEvalable
+func (n *NodeDestroyResource) EvalTree() EvalNode {
+ // stateId is the ID to put into the state
+ stateId := n.Addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: n.Addr.Type,
+ uniqueExtra: "destroy",
+ }
+
+ // Build the resource for eval
+ addr := n.Addr
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Get our state
+ rs := n.ResourceState
+ if rs == nil {
+ rs = &ResourceState{}
+ }
+
+ var diffApply *InstanceDiff
+ var provider ResourceProvider
+ var state *InstanceState
+ var err error
+ return &EvalOpFilter{
+ Ops: []walkOperation{walkApply, walkDestroy},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ // Get the saved diff for apply
+ &EvalReadDiff{
+ Name: stateId,
+ Diff: &diffApply,
+ },
+
+ // Filter the diff so we only get the destroy
+ &EvalFilterDiff{
+ Diff: &diffApply,
+ Output: &diffApply,
+ Destroy: true,
+ },
+
+ // If we're not destroying, then compare diffs
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if diffApply != nil && diffApply.GetDestroy() {
+ return true, nil
+ }
+
+ return true, EvalEarlyExitError{}
+ },
+ Then: EvalNoop{},
+ },
+
+ // Load the instance info so we have the module path set
+ &EvalInstanceInfo{Info: info},
+
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalRequireState{
+ State: &state,
+ },
+
+ // Call pre-apply hook
+ &EvalApplyPre{
+ Info: info,
+ State: &state,
+ Diff: &diffApply,
+ },
+
+ // Run destroy provisioners if not tainted
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if state != nil && state.Tainted {
+ return false, nil
+ }
+
+ return true, nil
+ },
+
+ Then: &EvalApplyProvisioners{
+ Info: info,
+ State: &state,
+ Resource: n.Config,
+ InterpResource: resource,
+ Error: &err,
+ When: config.ProvisionerWhenDestroy,
+ },
+ },
+
+ // If we have a provisioning error, then we just call
+ // the post-apply hook now.
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ return err != nil, nil
+ },
+
+ Then: &EvalApplyPost{
+ Info: info,
+ State: &state,
+ Error: &err,
+ },
+ },
+
+ // Make sure we handle data sources properly.
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ if n.Addr == nil {
+ return false, fmt.Errorf("nil address")
+ }
+
+ if n.Addr.Mode == config.DataResourceMode {
+ return true, nil
+ }
+
+ return false, nil
+ },
+
+ Then: &EvalReadDataApply{
+ Info: info,
+ Diff: &diffApply,
+ Provider: &provider,
+ Output: &state,
+ },
+ Else: &EvalApply{
+ Info: info,
+ State: &state,
+ Diff: &diffApply,
+ Provider: &provider,
+ Output: &state,
+ Error: &err,
+ },
+ },
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Addr.Type,
+ Provider: rs.Provider,
+ Dependencies: rs.Dependencies,
+ State: &state,
+ },
+ &EvalApplyPost{
+ Info: info,
+ State: &state,
+ Error: &err,
+ },
+ &EvalUpdateStateHook{},
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
new file mode 100644
index 00000000..52bbf88a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
@@ -0,0 +1,83 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// NodePlannableResource represents a resource that is "plannable":
+// it is ready to be planned in order to create a diff.
+type NodePlannableResource struct {
+ *NodeAbstractCountResource
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ // Grab the state which we read
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Expand the resource count which must be available by now from EvalTree
+ count, err := n.Config.Count()
+ if err != nil {
+ return nil, err
+ }
+
+ // The concrete resource factory we'll use
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ // Add the config and state since we don't do that via transforms
+ a.Config = n.Config
+
+ return &NodePlannableResourceInstance{
+ NodeAbstractResource: a,
+ }
+ }
+
+ // The concrete resource factory we'll use for oprhans
+ concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex {
+ // Add the config and state since we don't do that via transforms
+ a.Config = n.Config
+
+ return &NodePlannableResourceOrphan{
+ NodeAbstractResource: a,
+ }
+ }
+
+ // Start creating the steps
+ steps := []GraphTransformer{
+ // Expand the count.
+ &ResourceCountTransformer{
+ Concrete: concreteResource,
+ Count: count,
+ Addr: n.ResourceAddr(),
+ },
+
+ // Add the count orphans
+ &OrphanResourceCountTransformer{
+ Concrete: concreteResourceOrphan,
+ Count: count,
+ Addr: n.ResourceAddr(),
+ State: state,
+ },
+
+ // Attach the state
+ &AttachStateTransformer{State: state},
+
+ // Targeting
+ &TargetsTransformer{ParsedTargets: n.Targets},
+
+ // Connect references so ordering is correct
+ &ReferenceTransformer{},
+
+ // Make sure there is a single root
+ &RootTransformer{},
+ }
+
+ // Build the graph
+ b := &BasicGraphBuilder{
+ Steps: steps,
+ Validate: true,
+ Name: "NodePlannableResource",
+ }
+ return b.Build(ctx.Path())
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
new file mode 100644
index 00000000..9b02362b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
@@ -0,0 +1,53 @@
+package terraform
+
+// NodePlanDestroyableResource represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodePlanDestroyableResource struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeDestroyer
+func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress {
+ return n.Addr
+}
+
+// GraphNodeEvalable
+func (n *NodePlanDestroyableResource) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var diff *InstanceDiff
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalDiffDestroy{
+ Info: info,
+ State: &state,
+ Output: &diff,
+ },
+ &EvalCheckPreventDestroy{
+ Resource: n.Config,
+ Diff: &diff,
+ },
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
new file mode 100644
index 00000000..b5295690
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
@@ -0,0 +1,190 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodePlannableResourceInstance represents a _single_ resource
+// instance that is plannable. This means this represents a single
+// count index, for example.
+type NodePlannableResourceInstance struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodePlannableResourceInstance) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ ModulePath: normalizeModulePath(addr.Path),
+ }
+
+ // Build the resource for eval
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Determine the dependencies for the state.
+ stateDeps := n.StateReferences()
+
+ // Eval info is different depending on what kind of resource this is
+ switch n.Config.Mode {
+ case config.ManagedResourceMode:
+ return n.evalTreeManagedResource(
+ stateId, info, resource, stateDeps,
+ )
+ case config.DataResourceMode:
+ return n.evalTreeDataResource(
+ stateId, info, resource, stateDeps)
+ default:
+ panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
+ }
+}
+
+func (n *NodePlannableResourceInstance) evalTreeDataResource(
+ stateId string, info *InstanceInfo,
+ resource *Resource, stateDeps []string) EvalNode {
+ var provider ResourceProvider
+ var config *ResourceConfig
+ var diff *InstanceDiff
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+
+ // We need to re-interpolate the config here because some
+ // of the attributes may have become computed during
+ // earlier planning, due to other resources having
+ // "requires new resource" diffs.
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+
+ &EvalIf{
+ If: func(ctx EvalContext) (bool, error) {
+ computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0
+
+ // If the configuration is complete and we
+ // already have a state then we don't need to
+ // do any further work during apply, because we
+ // already populated the state during refresh.
+ if !computed && state != nil {
+ return true, EvalEarlyExitError{}
+ }
+
+ return true, nil
+ },
+ Then: EvalNoop{},
+ },
+
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+
+ &EvalReadDataDiff{
+ Info: info,
+ Config: &config,
+ Provider: &provider,
+ Output: &diff,
+ OutputState: &state,
+ },
+
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+ },
+ }
+}
+
+func (n *NodePlannableResourceInstance) evalTreeManagedResource(
+ stateId string, info *InstanceInfo,
+ resource *Resource, stateDeps []string) EvalNode {
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var provider ResourceProvider
+ var diff *InstanceDiff
+ var state *InstanceState
+ var resourceConfig *ResourceConfig
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &resourceConfig,
+ },
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ // Re-run validation to catch any errors we missed, e.g. type
+ // mismatches on computed values.
+ &EvalValidateResource{
+ Provider: &provider,
+ Config: &resourceConfig,
+ ResourceName: n.Config.Name,
+ ResourceType: n.Config.Type,
+ ResourceMode: n.Config.Mode,
+ IgnoreWarnings: true,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalDiff{
+ Name: stateId,
+ Info: info,
+ Config: &resourceConfig,
+ Resource: n.Config,
+ Provider: &provider,
+ State: &state,
+ OutputDiff: &diff,
+ OutputState: &state,
+ },
+ &EvalCheckPreventDestroy{
+ Resource: n.Config,
+ Diff: &diff,
+ },
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.Config.Type,
+ Provider: n.Config.Provider,
+ Dependencies: stateDeps,
+ State: &state,
+ },
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
new file mode 100644
index 00000000..73d6e41f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
@@ -0,0 +1,54 @@
+package terraform
+
+// NodePlannableResourceOrphan represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodePlannableResourceOrphan struct {
+ *NodeAbstractResource
+}
+
+func (n *NodePlannableResourceOrphan) Name() string {
+ return n.NodeAbstractResource.Name() + " (orphan)"
+}
+
+// GraphNodeEvalable
+func (n *NodePlannableResourceOrphan) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ ModulePath: normalizeModulePath(addr.Path),
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var diff *InstanceDiff
+ var state *InstanceState
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalDiffDestroy{
+ Info: info,
+ State: &state,
+ Output: &diff,
+ },
+ &EvalCheckPreventDestroy{
+ Resource: n.Config,
+ ResourceId: stateId,
+ Diff: &diff,
+ },
+ &EvalWriteDiff{
+ Name: stateId,
+ Diff: &diff,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
new file mode 100644
index 00000000..3a44926c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
@@ -0,0 +1,100 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeRefreshableResource represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodeRefreshableResource struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeDestroyer
+func (n *NodeRefreshableResource) DestroyAddr() *ResourceAddress {
+ return n.Addr
+}
+
+// GraphNodeEvalable
+func (n *NodeRefreshableResource) EvalTree() EvalNode {
+ // Eval info is different depending on what kind of resource this is
+ switch mode := n.Addr.Mode; mode {
+ case config.ManagedResourceMode:
+ return n.evalTreeManagedResource()
+
+ case config.DataResourceMode:
+ // Get the data source node. If we don't have a configuration
+ // then it is an orphan so we destroy it (remove it from the state).
+ var dn GraphNodeEvalable
+ if n.Config != nil {
+ dn = &NodeRefreshableDataResourceInstance{
+ NodeAbstractResource: n.NodeAbstractResource,
+ }
+ } else {
+ dn = &NodeDestroyableDataResource{
+ NodeAbstractResource: n.NodeAbstractResource,
+ }
+ }
+
+ return dn.EvalTree()
+ default:
+ panic(fmt.Errorf("unsupported resource mode %s", mode))
+ }
+}
+
+func (n *NodeRefreshableResource) evalTreeManagedResource() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // stateId is the ID to put into the state
+ stateId := addr.stateId()
+
+ // Build the instance info. More of this will be populated during eval
+ info := &InstanceInfo{
+ Id: stateId,
+ Type: addr.Type,
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var provider ResourceProvider
+ var state *InstanceState
+
+ // This happened during initial development. All known cases were
+ // fixed and tested but as a sanity check let's assert here.
+ if n.ResourceState == nil {
+ err := fmt.Errorf(
+ "No resource state attached for addr: %s\n\n"+
+ "This is a bug. Please report this to Terraform with your configuration\n"+
+ "and state attached. Please be careful to scrub any sensitive information.",
+ addr)
+ return &EvalReturnError{Error: &err}
+ }
+
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadState{
+ Name: stateId,
+ Output: &state,
+ },
+ &EvalRefresh{
+ Info: info,
+ Provider: &provider,
+ State: &state,
+ Output: &state,
+ },
+ &EvalWriteState{
+ Name: stateId,
+ ResourceType: n.ResourceState.Type,
+ Provider: n.ResourceState.Provider,
+ Dependencies: n.ResourceState.Dependencies,
+ State: &state,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
new file mode 100644
index 00000000..f528f24b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
@@ -0,0 +1,158 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// NodeValidatableResource represents a resource that is used for validation
+// only.
+type NodeValidatableResource struct {
+ *NodeAbstractCountResource
+}
+
+// GraphNodeEvalable
+func (n *NodeValidatableResource) EvalTree() EvalNode {
+ // Ensure we're validating
+ c := n.NodeAbstractCountResource
+ c.Validate = true
+ return c.EvalTree()
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ // Grab the state which we read
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+
+ // Expand the resource count which must be available by now from EvalTree
+ count := 1
+ if n.Config.RawCount.Value() != unknownValue() {
+ var err error
+ count, err = n.Config.Count()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // The concrete resource factory we'll use
+ concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+ // Add the config and state since we don't do that via transforms
+ a.Config = n.Config
+
+ return &NodeValidatableResourceInstance{
+ NodeAbstractResource: a,
+ }
+ }
+
+ // Start creating the steps
+ steps := []GraphTransformer{
+ // Expand the count.
+ &ResourceCountTransformer{
+ Concrete: concreteResource,
+ Count: count,
+ Addr: n.ResourceAddr(),
+ },
+
+ // Attach the state
+ &AttachStateTransformer{State: state},
+
+ // Targeting
+ &TargetsTransformer{ParsedTargets: n.Targets},
+
+ // Connect references so ordering is correct
+ &ReferenceTransformer{},
+
+ // Make sure there is a single root
+ &RootTransformer{},
+ }
+
+ // Build the graph
+ b := &BasicGraphBuilder{
+ Steps: steps,
+ Validate: true,
+ Name: "NodeValidatableResource",
+ }
+
+ return b.Build(ctx.Path())
+}
+
+// This represents a _single_ resource instance to validate.
+type NodeValidatableResourceInstance struct {
+ *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodeValidatableResourceInstance) EvalTree() EvalNode {
+ addr := n.NodeAbstractResource.Addr
+
+ // Build the resource for eval
+ resource := &Resource{
+ Name: addr.Name,
+ Type: addr.Type,
+ CountIndex: addr.Index,
+ }
+ if resource.CountIndex < 0 {
+ resource.CountIndex = 0
+ }
+
+ // Declare a bunch of variables that are used for state during
+ // evaluation. Most of this are written to by-address below.
+ var config *ResourceConfig
+ var provider ResourceProvider
+
+ seq := &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalValidateResourceSelfRef{
+ Addr: &addr,
+ Config: &n.Config.RawConfig,
+ },
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalInterpolate{
+ Config: n.Config.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+ &EvalValidateResource{
+ Provider: &provider,
+ Config: &config,
+ ResourceName: n.Config.Name,
+ ResourceType: n.Config.Type,
+ ResourceMode: n.Config.Mode,
+ },
+ },
+ }
+
+ // Validate all the provisioners
+ for _, p := range n.Config.Provisioners {
+ var provisioner ResourceProvisioner
+ var connConfig *ResourceConfig
+ seq.Nodes = append(
+ seq.Nodes,
+ &EvalGetProvisioner{
+ Name: p.Type,
+ Output: &provisioner,
+ },
+ &EvalInterpolate{
+ Config: p.RawConfig.Copy(),
+ Resource: resource,
+ Output: &config,
+ },
+ &EvalInterpolate{
+ Config: p.ConnInfo.Copy(),
+ Resource: resource,
+ Output: &connConfig,
+ },
+ &EvalValidateProvisioner{
+ Provisioner: &provisioner,
+ Config: &config,
+ ConnConfig: &connConfig,
+ },
+ )
+ }
+
+ return seq
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
new file mode 100644
index 00000000..cb61a4e3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
@@ -0,0 +1,22 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// NodeRootVariable represents a root variable input.
+type NodeRootVariable struct {
+ Config *config.Variable
+}
+
+func (n *NodeRootVariable) Name() string {
+ result := fmt.Sprintf("var.%s", n.Config.Name)
+ return result
+}
+
+// GraphNodeReferenceable
+func (n *NodeRootVariable) ReferenceableName() []string {
+ return []string{n.Name()}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go
new file mode 100644
index 00000000..ca99685a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/path.go
@@ -0,0 +1,24 @@
+package terraform
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+)
+
+// PathCacheKey returns a cache key for a module path.
+//
+// TODO: test
+func PathCacheKey(path []string) string {
+ // There is probably a better way to do this, but this is working for now.
+ // We just create an MD5 hash of all the MD5 hashes of all the path
+ // elements. This gets us the property that it is unique per ordering.
+ hash := md5.New()
+ for _, p := range path {
+ single := md5.Sum([]byte(p))
+ if _, err := hash.Write(single[:]); err != nil {
+ panic(err)
+ }
+ }
+
+ return hex.EncodeToString(hash.Sum(nil))
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go
new file mode 100644
index 00000000..ea088450
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go
@@ -0,0 +1,153 @@
+package terraform
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/hashicorp/terraform/config/module"
+)
+
+func init() {
+ gob.Register(make([]interface{}, 0))
+ gob.Register(make([]map[string]interface{}, 0))
+ gob.Register(make(map[string]interface{}))
+ gob.Register(make(map[string]string))
+}
+
+// Plan represents a single Terraform execution plan, which contains
+// all the information necessary to make an infrastructure change.
+//
+// A plan has to contain basically the entire state of the world
+// necessary to make a change: the state, diff, config, backend config, etc.
+// This is so that it can run alone without any other data.
+type Plan struct {
+ Diff *Diff
+ Module *module.Tree
+ State *State
+ Vars map[string]interface{}
+ Targets []string
+
+ // Backend is the backend that this plan should use and store data with.
+ Backend *BackendState
+
+ once sync.Once
+}
+
+// Context returns a Context with the data encapsulated in this plan.
+//
+// The following fields in opts are overridden by the plan: Config,
+// Diff, State, Variables.
+func (p *Plan) Context(opts *ContextOpts) (*Context, error) {
+ opts.Diff = p.Diff
+ opts.Module = p.Module
+ opts.State = p.State
+ opts.Targets = p.Targets
+
+ opts.Variables = make(map[string]interface{})
+ for k, v := range p.Vars {
+ opts.Variables[k] = v
+ }
+
+ return NewContext(opts)
+}
+
+func (p *Plan) String() string {
+ buf := new(bytes.Buffer)
+ buf.WriteString("DIFF:\n\n")
+ buf.WriteString(p.Diff.String())
+ buf.WriteString("\n\nSTATE:\n\n")
+ buf.WriteString(p.State.String())
+ return buf.String()
+}
+
+func (p *Plan) init() {
+ p.once.Do(func() {
+ if p.Diff == nil {
+ p.Diff = new(Diff)
+ p.Diff.init()
+ }
+
+ if p.State == nil {
+ p.State = new(State)
+ p.State.init()
+ }
+
+ if p.Vars == nil {
+ p.Vars = make(map[string]interface{})
+ }
+ })
+}
+
+// The format byte is prefixed into the plan file format so that we have
+// the ability in the future to change the file format if we want for any
+// reason.
+const planFormatMagic = "tfplan"
+const planFormatVersion byte = 1
+
+// ReadPlan reads a plan structure out of a reader in the format that
+// was written by WritePlan.
+func ReadPlan(src io.Reader) (*Plan, error) {
+ var result *Plan
+ var err error
+ n := 0
+
+ // Verify the magic bytes
+ magic := make([]byte, len(planFormatMagic))
+ for n < len(magic) {
+ n, err = src.Read(magic[n:])
+ if err != nil {
+ return nil, fmt.Errorf("error while reading magic bytes: %s", err)
+ }
+ }
+ if string(magic) != planFormatMagic {
+ return nil, fmt.Errorf("not a valid plan file")
+ }
+
+ // Verify the version is something we can read
+ var formatByte [1]byte
+ n, err = src.Read(formatByte[:])
+ if err != nil {
+ return nil, err
+ }
+ if n != len(formatByte) {
+ return nil, errors.New("failed to read plan version byte")
+ }
+
+ if formatByte[0] != planFormatVersion {
+ return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0])
+ }
+
+ dec := gob.NewDecoder(src)
+ if err := dec.Decode(&result); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// WritePlan writes a plan somewhere in a binary format.
+func WritePlan(d *Plan, dst io.Writer) error {
+ // Write the magic bytes so we can determine the file format later
+ n, err := dst.Write([]byte(planFormatMagic))
+ if err != nil {
+ return err
+ }
+ if n != len(planFormatMagic) {
+ return errors.New("failed to write plan format magic bytes")
+ }
+
+ // Write a version byte so we can iterate on version at some point
+ n, err = dst.Write([]byte{planFormatVersion})
+ if err != nil {
+ return err
+ }
+ if n != 1 {
+ return errors.New("failed to write plan version byte")
+ }
+
+ return gob.NewEncoder(dst).Encode(d)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go
new file mode 100644
index 00000000..0acf0beb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go
@@ -0,0 +1,360 @@
+package terraform
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/mitchellh/copystructure"
+ "github.com/mitchellh/reflectwalk"
+)
+
+// ResourceProvisionerConfig is used to pair a provisioner
+// with its provided configuration. This allows us to use singleton
+// instances of each ResourceProvisioner and to keep the relevant
+// configuration instead of instantiating a new Provisioner for each
+// resource.
+type ResourceProvisionerConfig struct {
+ Type string
+ Provisioner ResourceProvisioner
+ Config *ResourceConfig
+ RawConfig *config.RawConfig
+ ConnInfo *config.RawConfig
+}
+
+// Resource encapsulates a resource, its configuration, its provider,
+// its current state, and potentially a desired diff from the state it
+// wants to reach.
+type Resource struct {
+ // These are all used by the new EvalNode stuff.
+ Name string
+ Type string
+ CountIndex int
+
+ // These aren't really used anymore anywhere, but we keep them around
+ // since we haven't done a proper cleanup yet.
+ Id string
+ Info *InstanceInfo
+ Config *ResourceConfig
+ Dependencies []string
+ Diff *InstanceDiff
+ Provider ResourceProvider
+ State *InstanceState
+ Provisioners []*ResourceProvisionerConfig
+ Flags ResourceFlag
+}
+
+// ResourceKind specifies what kind of instance we're working with, whether
+// its a primary instance, a tainted instance, or an orphan.
+type ResourceFlag byte
+
+// InstanceInfo is used to hold information about the instance and/or
+// resource being modified.
+type InstanceInfo struct {
+ // Id is a unique name to represent this instance. This is not related
+ // to InstanceState.ID in any way.
+ Id string
+
+ // ModulePath is the complete path of the module containing this
+ // instance.
+ ModulePath []string
+
+ // Type is the resource type of this instance
+ Type string
+
+ // uniqueExtra is an internal field that can be populated to supply
+ // extra metadata that is used to identify a unique instance in
+ // the graph walk. This will be appended to HumanID when uniqueId
+ // is called.
+ uniqueExtra string
+}
+
+// HumanId is a unique Id that is human-friendly and useful for UI elements.
+func (i *InstanceInfo) HumanId() string {
+ if i == nil {
+ return "<nil>"
+ }
+
+ if len(i.ModulePath) <= 1 {
+ return i.Id
+ }
+
+ return fmt.Sprintf(
+ "module.%s.%s",
+ strings.Join(i.ModulePath[1:], "."),
+ i.Id)
+}
+
+func (i *InstanceInfo) uniqueId() string {
+ prefix := i.HumanId()
+ if v := i.uniqueExtra; v != "" {
+ prefix += " " + v
+ }
+
+ return prefix
+}
+
+// ResourceConfig holds the configuration given for a resource. This is
+// done instead of a raw `map[string]interface{}` type so that rich
+// methods can be added to it to make dealing with it easier.
+type ResourceConfig struct {
+ ComputedKeys []string
+ Raw map[string]interface{}
+ Config map[string]interface{}
+
+ raw *config.RawConfig
+}
+
+// NewResourceConfig creates a new ResourceConfig from a config.RawConfig.
+func NewResourceConfig(c *config.RawConfig) *ResourceConfig {
+ result := &ResourceConfig{raw: c}
+ result.interpolateForce()
+ return result
+}
+
+// DeepCopy performs a deep copy of the configuration. This makes it safe
+// to modify any of the structures that are part of the resource config without
+// affecting the original configuration.
+func (c *ResourceConfig) DeepCopy() *ResourceConfig {
+ // DeepCopying a nil should return a nil to avoid panics
+ if c == nil {
+ return nil
+ }
+
+ // Copy, this will copy all the exported attributes
+ copy, err := copystructure.Config{Lock: true}.Copy(c)
+ if err != nil {
+ panic(err)
+ }
+
+ // Force the type
+ result := copy.(*ResourceConfig)
+
+ // For the raw configuration, we can just use its own copy method
+ result.raw = c.raw.Copy()
+
+ return result
+}
+
+// Equal checks the equality of two resource configs.
+func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool {
+ // If either are nil, then they're only equal if they're both nil
+ if c == nil || c2 == nil {
+ return c == c2
+ }
+
+ // Sort the computed keys so they're deterministic
+ sort.Strings(c.ComputedKeys)
+ sort.Strings(c2.ComputedKeys)
+
+ // Two resource configs if their exported properties are equal.
+ // We don't compare "raw" because it is never used again after
+ // initialization and for all intents and purposes they are equal
+ // if the exported properties are equal.
+ check := [][2]interface{}{
+ {c.ComputedKeys, c2.ComputedKeys},
+ {c.Raw, c2.Raw},
+ {c.Config, c2.Config},
+ }
+ for _, pair := range check {
+ if !reflect.DeepEqual(pair[0], pair[1]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// CheckSet checks that the given list of configuration keys is
+// properly set. If not, errors are returned for each unset key.
+//
+// This is useful to be called in the Validate method of a ResourceProvider.
+func (c *ResourceConfig) CheckSet(keys []string) []error {
+ var errs []error
+
+ for _, k := range keys {
+ if !c.IsSet(k) {
+ errs = append(errs, fmt.Errorf("%s must be set", k))
+ }
+ }
+
+ return errs
+}
+
+// Get looks up a configuration value by key and returns the value.
+//
+// The second return value is true if the get was successful. Get will
+// return the raw value if the key is computed, so you should pair this
+// with IsComputed.
+func (c *ResourceConfig) Get(k string) (interface{}, bool) {
+ // We aim to get a value from the configuration. If it is computed,
+ // then we return the pure raw value.
+ source := c.Config
+ if c.IsComputed(k) {
+ source = c.Raw
+ }
+
+ return c.get(k, source)
+}
+
+// GetRaw looks up a configuration value by key and returns the value,
+// from the raw, uninterpolated config.
+//
+// The second return value is true if the get was successful. Get will
+// not succeed if the value is being computed.
+func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) {
+ return c.get(k, c.Raw)
+}
+
+// IsComputed returns whether the given key is computed or not.
+func (c *ResourceConfig) IsComputed(k string) bool {
+ // The next thing we do is check the config if we get a computed
+ // value out of it.
+ v, ok := c.get(k, c.Config)
+ if !ok {
+ return false
+ }
+
+ // If value is nil, then it isn't computed
+ if v == nil {
+ return false
+ }
+
+ // Test if the value contains an unknown value
+ var w unknownCheckWalker
+ if err := reflectwalk.Walk(v, &w); err != nil {
+ panic(err)
+ }
+
+ return w.Unknown
+}
+
+// IsSet checks if the key in the configuration is set. A key is set if
+// it has a value or the value is being computed (is unknown currently).
+//
+// This function should be used rather than checking the keys of the
+// raw configuration itself, since a key may be omitted from the raw
+// configuration if it is being computed.
+func (c *ResourceConfig) IsSet(k string) bool {
+ if c == nil {
+ return false
+ }
+
+ if c.IsComputed(k) {
+ return true
+ }
+
+ if _, ok := c.Get(k); ok {
+ return true
+ }
+
+ return false
+}
+
+func (c *ResourceConfig) get(
+ k string, raw map[string]interface{}) (interface{}, bool) {
+ parts := strings.Split(k, ".")
+ if len(parts) == 1 && parts[0] == "" {
+ parts = nil
+ }
+
+ var current interface{} = raw
+ var previous interface{} = nil
+ for i, part := range parts {
+ if current == nil {
+ return nil, false
+ }
+
+ cv := reflect.ValueOf(current)
+ switch cv.Kind() {
+ case reflect.Map:
+ previous = current
+ v := cv.MapIndex(reflect.ValueOf(part))
+ if !v.IsValid() {
+ if i > 0 && i != (len(parts)-1) {
+ tryKey := strings.Join(parts[i:], ".")
+ v := cv.MapIndex(reflect.ValueOf(tryKey))
+ if !v.IsValid() {
+ return nil, false
+ }
+
+ return v.Interface(), true
+ }
+
+ return nil, false
+ }
+
+ current = v.Interface()
+ case reflect.Slice:
+ previous = current
+
+ if part == "#" {
+ // If any value in a list is computed, this whole thing
+ // is computed and we can't read any part of it.
+ for i := 0; i < cv.Len(); i++ {
+ if v := cv.Index(i).Interface(); v == unknownValue() {
+ return v, true
+ }
+ }
+
+ current = cv.Len()
+ } else {
+ i, err := strconv.ParseInt(part, 0, 0)
+ if err != nil {
+ return nil, false
+ }
+ if i >= int64(cv.Len()) {
+ return nil, false
+ }
+ current = cv.Index(int(i)).Interface()
+ }
+ case reflect.String:
+ // This happens when map keys contain "." and have a common
+ // prefix so were split as path components above.
+ actualKey := strings.Join(parts[i-1:], ".")
+ if prevMap, ok := previous.(map[string]interface{}); ok {
+ v, ok := prevMap[actualKey]
+ return v, ok
+ }
+
+ return nil, false
+ default:
+ panic(fmt.Sprintf("Unknown kind: %s", cv.Kind()))
+ }
+ }
+
+ return current, true
+}
+
+// interpolateForce is a temporary thing. We want to get rid of interpolate
+// above and likewise this, but it can only be done after the f-ast-graph
+// refactor is complete.
+func (c *ResourceConfig) interpolateForce() {
+ if c.raw == nil {
+ var err error
+ c.raw, err = config.NewRawConfig(make(map[string]interface{}))
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ c.ComputedKeys = c.raw.UnknownKeys()
+ c.Raw = c.raw.RawMap()
+ c.Config = c.raw.Config()
+}
+
+// unknownCheckWalker
+type unknownCheckWalker struct {
+ Unknown bool
+}
+
+func (w *unknownCheckWalker) Primitive(v reflect.Value) error {
+ if v.Interface() == unknownValue() {
+ w.Unknown = true
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
new file mode 100644
index 00000000..a8a0c955
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
@@ -0,0 +1,301 @@
+package terraform
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// ResourceAddress is a way of identifying an individual resource (or,
+// eventually, a subset of resources) within the state. It is used for Targets.
+type ResourceAddress struct {
+ // Addresses a resource falling somewhere in the module path
+ // When specified alone, addresses all resources within a module path
+ Path []string
+
+ // Addresses a specific resource that occurs in a list
+ Index int
+
+ InstanceType InstanceType
+ InstanceTypeSet bool
+ Name string
+ Type string
+ Mode config.ResourceMode // significant only if InstanceTypeSet
+}
+
+// Copy returns a copy of this ResourceAddress
+func (r *ResourceAddress) Copy() *ResourceAddress {
+ if r == nil {
+ return nil
+ }
+
+ n := &ResourceAddress{
+ Path: make([]string, 0, len(r.Path)),
+ Index: r.Index,
+ InstanceType: r.InstanceType,
+ Name: r.Name,
+ Type: r.Type,
+ Mode: r.Mode,
+ }
+ for _, p := range r.Path {
+ n.Path = append(n.Path, p)
+ }
+ return n
+}
+
+// String outputs the address that parses into this address.
+func (r *ResourceAddress) String() string {
+ var result []string
+ for _, p := range r.Path {
+ result = append(result, "module", p)
+ }
+
+ switch r.Mode {
+ case config.ManagedResourceMode:
+ // nothing to do
+ case config.DataResourceMode:
+ result = append(result, "data")
+ default:
+ panic(fmt.Errorf("unsupported resource mode %s", r.Mode))
+ }
+
+ if r.Type != "" {
+ result = append(result, r.Type)
+ }
+
+ if r.Name != "" {
+ name := r.Name
+ if r.InstanceTypeSet {
+ switch r.InstanceType {
+ case TypePrimary:
+ name += ".primary"
+ case TypeDeposed:
+ name += ".deposed"
+ case TypeTainted:
+ name += ".tainted"
+ }
+ }
+
+ if r.Index >= 0 {
+ name += fmt.Sprintf("[%d]", r.Index)
+ }
+ result = append(result, name)
+ }
+
+ return strings.Join(result, ".")
+}
+
+// stateId returns the ID that this resource should be entered with
+// in the state. This is also used for diffs. In the future, we'd like to
+// move away from this string field so I don't export this.
+func (r *ResourceAddress) stateId() string {
+ result := fmt.Sprintf("%s.%s", r.Type, r.Name)
+ switch r.Mode {
+ case config.ManagedResourceMode:
+ // Done
+ case config.DataResourceMode:
+ result = fmt.Sprintf("data.%s", result)
+ default:
+ panic(fmt.Errorf("unknown resource mode: %s", r.Mode))
+ }
+ if r.Index >= 0 {
+ result += fmt.Sprintf(".%d", r.Index)
+ }
+
+ return result
+}
+
+// parseResourceAddressConfig creates a resource address from a config.Resource
+func parseResourceAddressConfig(r *config.Resource) (*ResourceAddress, error) {
+ return &ResourceAddress{
+ Type: r.Type,
+ Name: r.Name,
+ Index: -1,
+ InstanceType: TypePrimary,
+ Mode: r.Mode,
+ }, nil
+}
+
+// parseResourceAddressInternal parses the somewhat bespoke resource
+// identifier used in states and diffs, such as "instance.name.0".
+func parseResourceAddressInternal(s string) (*ResourceAddress, error) {
+ // Split based on ".". Every resource address should have at least two
+ // elements (type and name).
+ parts := strings.Split(s, ".")
+ if len(parts) < 2 || len(parts) > 4 {
+ return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
+ }
+
+ // Data resource if we have at least 3 parts and the first one is data
+ mode := config.ManagedResourceMode
+ if len(parts) > 2 && parts[0] == "data" {
+ mode = config.DataResourceMode
+ parts = parts[1:]
+ }
+
+ // If we're not a data resource and we have more than 3, then it is an error
+ if len(parts) > 3 && mode != config.DataResourceMode {
+ return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
+ }
+
+ // Build the parts of the resource address that are guaranteed to exist
+ addr := &ResourceAddress{
+ Type: parts[0],
+ Name: parts[1],
+ Index: -1,
+ InstanceType: TypePrimary,
+ Mode: mode,
+ }
+
+ // If we have more parts, then we have an index. Parse that.
+ if len(parts) > 2 {
+ idx, err := strconv.ParseInt(parts[2], 0, 0)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err)
+ }
+
+ addr.Index = int(idx)
+ }
+
+ return addr, nil
+}
+
+func ParseResourceAddress(s string) (*ResourceAddress, error) {
+ matches, err := tokenizeResourceAddress(s)
+ if err != nil {
+ return nil, err
+ }
+ mode := config.ManagedResourceMode
+ if matches["data_prefix"] != "" {
+ mode = config.DataResourceMode
+ }
+ resourceIndex, err := ParseResourceIndex(matches["index"])
+ if err != nil {
+ return nil, err
+ }
+ instanceType, err := ParseInstanceType(matches["instance_type"])
+ if err != nil {
+ return nil, err
+ }
+ path := ParseResourcePath(matches["path"])
+
+ // not allowed to say "data." without a type following
+ if mode == config.DataResourceMode && matches["type"] == "" {
+ return nil, fmt.Errorf("must target specific data instance")
+ }
+
+ return &ResourceAddress{
+ Path: path,
+ Index: resourceIndex,
+ InstanceType: instanceType,
+ InstanceTypeSet: matches["instance_type"] != "",
+ Name: matches["name"],
+ Type: matches["type"],
+ Mode: mode,
+ }, nil
+}
+
+func (addr *ResourceAddress) Equals(raw interface{}) bool {
+ other, ok := raw.(*ResourceAddress)
+ if !ok {
+ return false
+ }
+
+ pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 ||
+ reflect.DeepEqual(addr.Path, other.Path)
+
+ indexMatch := addr.Index == -1 ||
+ other.Index == -1 ||
+ addr.Index == other.Index
+
+ nameMatch := addr.Name == "" ||
+ other.Name == "" ||
+ addr.Name == other.Name
+
+ typeMatch := addr.Type == "" ||
+ other.Type == "" ||
+ addr.Type == other.Type
+
+ // mode is significant only when type is set
+ modeMatch := addr.Type == "" ||
+ other.Type == "" ||
+ addr.Mode == other.Mode
+
+ return pathMatch &&
+ indexMatch &&
+ addr.InstanceType == other.InstanceType &&
+ nameMatch &&
+ typeMatch &&
+ modeMatch
+}
+
+func ParseResourceIndex(s string) (int, error) {
+ if s == "" {
+ return -1, nil
+ }
+ return strconv.Atoi(s)
+}
+
+func ParseResourcePath(s string) []string {
+ if s == "" {
+ return nil
+ }
+ parts := strings.Split(s, ".")
+ path := make([]string, 0, len(parts))
+ for _, s := range parts {
+ // Due to the limitations of the regexp match below, the path match has
+ // some noise in it we have to filter out :|
+ if s == "" || s == "module" {
+ continue
+ }
+ path = append(path, s)
+ }
+ return path
+}
+
+func ParseInstanceType(s string) (InstanceType, error) {
+ switch s {
+ case "", "primary":
+ return TypePrimary, nil
+ case "deposed":
+ return TypeDeposed, nil
+ case "tainted":
+ return TypeTainted, nil
+ default:
+ return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s)
+ }
+}
+
+func tokenizeResourceAddress(s string) (map[string]string, error) {
+ // Example of portions of the regexp below using the
+ // string "aws_instance.web.tainted[1]"
+ re := regexp.MustCompile(`\A` +
+ // "module.foo.module.bar" (optional)
+ `(?P<path>(?:module\.[^.]+\.?)*)` +
+ // possibly "data.", if targeting is a data resource
+ `(?P<data_prefix>(?:data\.)?)` +
+ // "aws_instance.web" (optional when module path specified)
+ `(?:(?P<type>[^.]+)\.(?P<name>[^.[]+))?` +
+ // "tainted" (optional, omission implies: "primary")
+ `(?:\.(?P<instance_type>\w+))?` +
+ // "1" (optional, omission implies: "0")
+ `(?:\[(?P<index>\d+)\])?` +
+ `\z`)
+
+ groupNames := re.SubexpNames()
+ rawMatches := re.FindAllStringSubmatch(s, -1)
+ if len(rawMatches) != 1 {
+ return nil, fmt.Errorf("Problem parsing address: %q", s)
+ }
+
+ matches := make(map[string]string)
+ for i, m := range rawMatches[0] {
+ matches[groupNames[i]] = m
+ }
+
+ return matches, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
new file mode 100644
index 00000000..1a68c869
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
@@ -0,0 +1,204 @@
+package terraform
+
+// ResourceProvider is an interface that must be implemented by any
+// resource provider: the thing that creates and manages the resources in
+// a Terraform configuration.
+//
+// Important implementation note: All returned pointers, such as
+// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to
+// shared data. Terraform is highly parallel and assumes that this data is safe
+// to read/write in parallel so it must be unique references. Note that it is
+// safe to return arguments as results, however.
+type ResourceProvider interface {
+ /*********************************************************************
+ * Functions related to the provider
+ *********************************************************************/
+
+ // Input is called to ask the provider to ask the user for input
+ // for completing the configuration if necesarry.
+ //
+ // This may or may not be called, so resource provider writers shouldn't
+ // rely on this being available to set some default values for validate
+ // later. Example of a situation where this wouldn't be called is if
+ // the user is not using a TTY.
+ Input(UIInput, *ResourceConfig) (*ResourceConfig, error)
+
+ // Validate is called once at the beginning with the raw configuration
+ // (no interpolation done) and can return a list of warnings and/or
+ // errors.
+ //
+ // This is called once with the provider configuration only. It may not
+ // be called at all if no provider configuration is given.
+ //
+ // This should not assume that any values of the configurations are valid.
+ // The primary use case of this call is to check that required keys are
+ // set.
+ Validate(*ResourceConfig) ([]string, []error)
+
+ // Configure configures the provider itself with the configuration
+ // given. This is useful for setting things like access keys.
+ //
+ // This won't be called at all if no provider configuration is given.
+ //
+ // Configure returns an error if it occurred.
+ Configure(*ResourceConfig) error
+
+ // Resources returns all the available resource types that this provider
+ // knows how to manage.
+ Resources() []ResourceType
+
+ // Stop is called when the provider should halt any in-flight actions.
+ //
+ // This can be used to make a nicer Ctrl-C experience for Terraform.
+ // Even if this isn't implemented to do anything (just returns nil),
+ // Terraform will still cleanly stop after the currently executing
+ // graph node is complete. However, this API can be used to make more
+ // efficient halts.
+ //
+ // Stop doesn't have to and shouldn't block waiting for in-flight actions
+ // to complete. It should take any action it wants and return immediately
+ // acknowledging it has received the stop request. Terraform core will
+ // automatically not make any further API calls to the provider soon
+ // after Stop is called (technically exactly once the currently executing
+ // graph nodes are complete).
+ //
+ // The error returned, if non-nil, is assumed to mean that signaling the
+ // stop somehow failed and that the user should expect potentially waiting
+ // a longer period of time.
+ Stop() error
+
+ /*********************************************************************
+ * Functions related to individual resources
+ *********************************************************************/
+
+ // ValidateResource is called once at the beginning with the raw
+ // configuration (no interpolation done) and can return a list of warnings
+ // and/or errors.
+ //
+ // This is called once per resource.
+ //
+ // This should not assume any of the values in the resource configuration
+ // are valid since it is possible they have to be interpolated still.
+ // The primary use case of this call is to check that the required keys
+ // are set and that the general structure is correct.
+ ValidateResource(string, *ResourceConfig) ([]string, []error)
+
+ // Apply applies a diff to a specific resource and returns the new
+ // resource state along with an error.
+ //
+ // If the resource state given has an empty ID, then a new resource
+ // is expected to be created.
+ Apply(
+ *InstanceInfo,
+ *InstanceState,
+ *InstanceDiff) (*InstanceState, error)
+
+ // Diff diffs a resource versus a desired state and returns
+ // a diff.
+ Diff(
+ *InstanceInfo,
+ *InstanceState,
+ *ResourceConfig) (*InstanceDiff, error)
+
+ // Refresh refreshes a resource and updates all of its attributes
+ // with the latest information.
+ Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error)
+
+ /*********************************************************************
+ * Functions related to importing
+ *********************************************************************/
+
+ // ImportState requests that the given resource be imported.
+ //
+ // The returned InstanceState only requires ID be set. Importing
+ // will always call Refresh after the state to complete it.
+ //
+ // IMPORTANT: InstanceState doesn't have the resource type attached
+ // to it. A type must be specified on the state via the Ephemeral
+ // field on the state.
+ //
+ // This function can return multiple states. Normally, an import
+ // will map 1:1 to a physical resource. However, some resources map
+ // to multiple. For example, an AWS security group may contain many rules.
+ // Each rule is represented by a separate resource in Terraform,
+ // therefore multiple states are returned.
+ ImportState(*InstanceInfo, string) ([]*InstanceState, error)
+
+ /*********************************************************************
+ * Functions related to data resources
+ *********************************************************************/
+
+ // ValidateDataSource is called once at the beginning with the raw
+ // configuration (no interpolation done) and can return a list of warnings
+ // and/or errors.
+ //
+ // This is called once per data source instance.
+ //
+ // This should not assume any of the values in the resource configuration
+ // are valid since it is possible they have to be interpolated still.
+ // The primary use case of this call is to check that the required keys
+ // are set and that the general structure is correct.
+ ValidateDataSource(string, *ResourceConfig) ([]string, []error)
+
+ // DataSources returns all of the available data sources that this
+ // provider implements.
+ DataSources() []DataSource
+
+ // ReadDataDiff produces a diff that represents the state that will
+ // be produced when the given data source is read using a later call
+ // to ReadDataApply.
+ ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
+
+ // ReadDataApply initializes a data instance using the configuration
+ // in a diff produced by ReadDataDiff.
+ ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
+}
+
+// ResourceProviderCloser is an interface that providers that can close
+// connections that aren't needed anymore must implement.
+type ResourceProviderCloser interface {
+ Close() error
+}
+
+// ResourceType is a type of resource that a resource provider can manage.
+type ResourceType struct {
+ Name string // Name of the resource, example "instance" (no provider prefix)
+ Importable bool // Whether this resource supports importing
+}
+
+// DataSource is a data source that a resource provider implements.
+type DataSource struct {
+ Name string
+}
+
+// ResourceProviderFactory is a function type that creates a new instance
+// of a resource provider.
+type ResourceProviderFactory func() (ResourceProvider, error)
+
+// ResourceProviderFactoryFixed is a helper that creates a
+// ResourceProviderFactory that just returns some fixed provider.
+func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory {
+ return func() (ResourceProvider, error) {
+ return p, nil
+ }
+}
+
+func ProviderHasResource(p ResourceProvider, n string) bool {
+ for _, rt := range p.Resources() {
+ if rt.Name == n {
+ return true
+ }
+ }
+
+ return false
+}
+
+func ProviderHasDataSource(p ResourceProvider, n string) bool {
+ for _, rt := range p.DataSources() {
+ if rt.Name == n {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
new file mode 100644
index 00000000..f5315339
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
@@ -0,0 +1,297 @@
+package terraform
+
+import "sync"
+
+// MockResourceProvider implements ResourceProvider but mocks out all the
+// calls for testing purposes.
+type MockResourceProvider struct {
+ sync.Mutex
+
+ // Anything you want, in case you need to store extra data with the mock.
+ Meta interface{}
+
+ CloseCalled bool
+ CloseError error
+ InputCalled bool
+ InputInput UIInput
+ InputConfig *ResourceConfig
+ InputReturnConfig *ResourceConfig
+ InputReturnError error
+ InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error)
+ ApplyCalled bool
+ ApplyInfo *InstanceInfo
+ ApplyState *InstanceState
+ ApplyDiff *InstanceDiff
+ ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error)
+ ApplyReturn *InstanceState
+ ApplyReturnError error
+ ConfigureCalled bool
+ ConfigureConfig *ResourceConfig
+ ConfigureFn func(*ResourceConfig) error
+ ConfigureReturnError error
+ DiffCalled bool
+ DiffInfo *InstanceInfo
+ DiffState *InstanceState
+ DiffDesired *ResourceConfig
+ DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error)
+ DiffReturn *InstanceDiff
+ DiffReturnError error
+ RefreshCalled bool
+ RefreshInfo *InstanceInfo
+ RefreshState *InstanceState
+ RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error)
+ RefreshReturn *InstanceState
+ RefreshReturnError error
+ ResourcesCalled bool
+ ResourcesReturn []ResourceType
+ ReadDataApplyCalled bool
+ ReadDataApplyInfo *InstanceInfo
+ ReadDataApplyDiff *InstanceDiff
+ ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
+ ReadDataApplyReturn *InstanceState
+ ReadDataApplyReturnError error
+ ReadDataDiffCalled bool
+ ReadDataDiffInfo *InstanceInfo
+ ReadDataDiffDesired *ResourceConfig
+ ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
+ ReadDataDiffReturn *InstanceDiff
+ ReadDataDiffReturnError error
+ StopCalled bool
+ StopFn func() error
+ StopReturnError error
+ DataSourcesCalled bool
+ DataSourcesReturn []DataSource
+ ValidateCalled bool
+ ValidateConfig *ResourceConfig
+ ValidateFn func(*ResourceConfig) ([]string, []error)
+ ValidateReturnWarns []string
+ ValidateReturnErrors []error
+ ValidateResourceFn func(string, *ResourceConfig) ([]string, []error)
+ ValidateResourceCalled bool
+ ValidateResourceType string
+ ValidateResourceConfig *ResourceConfig
+ ValidateResourceReturnWarns []string
+ ValidateResourceReturnErrors []error
+ ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error)
+ ValidateDataSourceCalled bool
+ ValidateDataSourceType string
+ ValidateDataSourceConfig *ResourceConfig
+ ValidateDataSourceReturnWarns []string
+ ValidateDataSourceReturnErrors []error
+
+ ImportStateCalled bool
+ ImportStateInfo *InstanceInfo
+ ImportStateID string
+ ImportStateReturn []*InstanceState
+ ImportStateReturnError error
+ ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error)
+}
+
+func (p *MockResourceProvider) Close() error {
+ p.CloseCalled = true
+ return p.CloseError
+}
+
+func (p *MockResourceProvider) Input(
+ input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
+ p.InputCalled = true
+ p.InputInput = input
+ p.InputConfig = c
+ if p.InputFn != nil {
+ return p.InputFn(input, c)
+ }
+ return p.InputReturnConfig, p.InputReturnError
+}
+
+func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ValidateCalled = true
+ p.ValidateConfig = c
+ if p.ValidateFn != nil {
+ return p.ValidateFn(c)
+ }
+ return p.ValidateReturnWarns, p.ValidateReturnErrors
+}
+
+func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ValidateResourceCalled = true
+ p.ValidateResourceType = t
+ p.ValidateResourceConfig = c
+
+ if p.ValidateResourceFn != nil {
+ return p.ValidateResourceFn(t, c)
+ }
+
+ return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors
+}
+
+func (p *MockResourceProvider) Configure(c *ResourceConfig) error {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ConfigureCalled = true
+ p.ConfigureConfig = c
+
+ if p.ConfigureFn != nil {
+ return p.ConfigureFn(c)
+ }
+
+ return p.ConfigureReturnError
+}
+
+func (p *MockResourceProvider) Stop() error {
+ p.Lock()
+ defer p.Unlock()
+
+ p.StopCalled = true
+ if p.StopFn != nil {
+ return p.StopFn()
+ }
+
+ return p.StopReturnError
+}
+
+func (p *MockResourceProvider) Apply(
+ info *InstanceInfo,
+ state *InstanceState,
+ diff *InstanceDiff) (*InstanceState, error) {
+ // We only lock while writing data. Reading is fine
+ p.Lock()
+ p.ApplyCalled = true
+ p.ApplyInfo = info
+ p.ApplyState = state
+ p.ApplyDiff = diff
+ p.Unlock()
+
+ if p.ApplyFn != nil {
+ return p.ApplyFn(info, state, diff)
+ }
+
+ return p.ApplyReturn.DeepCopy(), p.ApplyReturnError
+}
+
+func (p *MockResourceProvider) Diff(
+ info *InstanceInfo,
+ state *InstanceState,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.DiffCalled = true
+ p.DiffInfo = info
+ p.DiffState = state
+ p.DiffDesired = desired
+ if p.DiffFn != nil {
+ return p.DiffFn(info, state, desired)
+ }
+
+ return p.DiffReturn.DeepCopy(), p.DiffReturnError
+}
+
+func (p *MockResourceProvider) Refresh(
+ info *InstanceInfo,
+ s *InstanceState) (*InstanceState, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.RefreshCalled = true
+ p.RefreshInfo = info
+ p.RefreshState = s
+
+ if p.RefreshFn != nil {
+ return p.RefreshFn(info, s)
+ }
+
+ return p.RefreshReturn.DeepCopy(), p.RefreshReturnError
+}
+
+func (p *MockResourceProvider) Resources() []ResourceType {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ResourcesCalled = true
+ return p.ResourcesReturn
+}
+
+func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ImportStateCalled = true
+ p.ImportStateInfo = info
+ p.ImportStateID = id
+ if p.ImportStateFn != nil {
+ return p.ImportStateFn(info, id)
+ }
+
+ var result []*InstanceState
+ if p.ImportStateReturn != nil {
+ result = make([]*InstanceState, len(p.ImportStateReturn))
+ for i, v := range p.ImportStateReturn {
+ result[i] = v.DeepCopy()
+ }
+ }
+
+ return result, p.ImportStateReturnError
+}
+
+func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ValidateDataSourceCalled = true
+ p.ValidateDataSourceType = t
+ p.ValidateDataSourceConfig = c
+
+ if p.ValidateDataSourceFn != nil {
+ return p.ValidateDataSourceFn(t, c)
+ }
+
+ return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors
+}
+
+func (p *MockResourceProvider) ReadDataDiff(
+ info *InstanceInfo,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ReadDataDiffCalled = true
+ p.ReadDataDiffInfo = info
+ p.ReadDataDiffDesired = desired
+ if p.ReadDataDiffFn != nil {
+ return p.ReadDataDiffFn(info, desired)
+ }
+
+ return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError
+}
+
+func (p *MockResourceProvider) ReadDataApply(
+ info *InstanceInfo,
+ d *InstanceDiff) (*InstanceState, error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ReadDataApplyCalled = true
+ p.ReadDataApplyInfo = info
+ p.ReadDataApplyDiff = d
+
+ if p.ReadDataApplyFn != nil {
+ return p.ReadDataApplyFn(info, d)
+ }
+
+ return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError
+}
+
+func (p *MockResourceProvider) DataSources() []DataSource {
+ p.Lock()
+ defer p.Unlock()
+
+ p.DataSourcesCalled = true
+ return p.DataSourcesReturn
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
new file mode 100644
index 00000000..361ec1ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
@@ -0,0 +1,54 @@
+package terraform
+
+// ResourceProvisioner is an interface that must be implemented by any
+// resource provisioner: the thing that initializes resources in
+// a Terraform configuration.
+type ResourceProvisioner interface {
+ // Validate is called once at the beginning with the raw
+ // configuration (no interpolation done) and can return a list of warnings
+ // and/or errors.
+ //
+ // This is called once per resource.
+ //
+ // This should not assume any of the values in the resource configuration
+ // are valid since it is possible they have to be interpolated still.
+ // The primary use case of this call is to check that the required keys
+ // are set and that the general structure is correct.
+ Validate(*ResourceConfig) ([]string, []error)
+
+ // Apply runs the provisioner on a specific resource and returns the new
+ // resource state along with an error. Instead of a diff, the ResourceConfig
+ // is provided since provisioners only run after a resource has been
+ // newly created.
+ Apply(UIOutput, *InstanceState, *ResourceConfig) error
+
+ // Stop is called when the provisioner should halt any in-flight actions.
+ //
+ // This can be used to make a nicer Ctrl-C experience for Terraform.
+ // Even if this isn't implemented to do anything (just returns nil),
+ // Terraform will still cleanly stop after the currently executing
+ // graph node is complete. However, this API can be used to make more
+ // efficient halts.
+ //
+ // Stop doesn't have to and shouldn't block waiting for in-flight actions
+ // to complete. It should take any action it wants and return immediately
+ // acknowledging it has received the stop request. Terraform core will
+ // automatically not make any further API calls to the provider soon
+ // after Stop is called (technically exactly once the currently executing
+ // graph nodes are complete).
+ //
+ // The error returned, if non-nil, is assumed to mean that signaling the
+ // stop somehow failed and that the user should expect potentially waiting
+ // a longer period of time.
+ Stop() error
+}
+
+// ResourceProvisionerCloser is an interface that provisioners that can close
+// connections that aren't needed anymore must implement.
+type ResourceProvisionerCloser interface {
+ Close() error
+}
+
+// ResourceProvisionerFactory is a function type that creates a new instance
+// of a resource provisioner.
+type ResourceProvisionerFactory func() (ResourceProvisioner, error)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
new file mode 100644
index 00000000..f471a518
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
@@ -0,0 +1,72 @@
+package terraform
+
+import "sync"
+
+// MockResourceProvisioner implements ResourceProvisioner but mocks out all the
+// calls for testing purposes.
+type MockResourceProvisioner struct {
+ sync.Mutex
+ // Anything you want, in case you need to store extra data with the mock.
+ Meta interface{}
+
+ ApplyCalled bool
+ ApplyOutput UIOutput
+ ApplyState *InstanceState
+ ApplyConfig *ResourceConfig
+ ApplyFn func(*InstanceState, *ResourceConfig) error
+ ApplyReturnError error
+
+ ValidateCalled bool
+ ValidateConfig *ResourceConfig
+ ValidateFn func(c *ResourceConfig) ([]string, []error)
+ ValidateReturnWarns []string
+ ValidateReturnErrors []error
+
+ StopCalled bool
+ StopFn func() error
+ StopReturnError error
+}
+
+func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) {
+ p.Lock()
+ defer p.Unlock()
+
+ p.ValidateCalled = true
+ p.ValidateConfig = c
+ if p.ValidateFn != nil {
+ return p.ValidateFn(c)
+ }
+ return p.ValidateReturnWarns, p.ValidateReturnErrors
+}
+
+func (p *MockResourceProvisioner) Apply(
+ output UIOutput,
+ state *InstanceState,
+ c *ResourceConfig) error {
+ p.Lock()
+
+ p.ApplyCalled = true
+ p.ApplyOutput = output
+ p.ApplyState = state
+ p.ApplyConfig = c
+ if p.ApplyFn != nil {
+ fn := p.ApplyFn
+ p.Unlock()
+ return fn(state, c)
+ }
+
+ defer p.Unlock()
+ return p.ApplyReturnError
+}
+
+func (p *MockResourceProvisioner) Stop() error {
+ p.Lock()
+ defer p.Unlock()
+
+ p.StopCalled = true
+ if p.StopFn != nil {
+ return p.StopFn()
+ }
+
+ return p.StopReturnError
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
new file mode 100644
index 00000000..20f1d8a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
@@ -0,0 +1,132 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphSemanticChecker is the interface that semantic checks across
+// the entire Terraform graph implement.
+//
+// The graph should NOT be modified by the semantic checker.
+type GraphSemanticChecker interface {
+ Check(*dag.Graph) error
+}
+
+// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker
+// that runs a list of SemanticCheckers against the vertices of the graph
+// in no specified order.
+type UnorderedSemanticCheckRunner struct {
+ Checks []SemanticChecker
+}
+
+func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error {
+ var err error
+ for _, v := range g.Vertices() {
+ for _, check := range sc.Checks {
+ if e := check.Check(g, v); e != nil {
+ err = multierror.Append(err, e)
+ }
+ }
+ }
+
+ return err
+}
+
+// SemanticChecker is the interface that semantic checks across the
+// Terraform graph implement. Errors are accumulated. Even after an error
+// is returned, child vertices in the graph will still be visited.
+//
+// The graph should NOT be modified by the semantic checker.
+//
+// The order in which vertices are visited is left unspecified, so the
+// semantic checks should not rely on that.
+type SemanticChecker interface {
+ Check(*dag.Graph, dag.Vertex) error
+}
+
+// smcUserVariables does all the semantic checks to verify that the
+// variables given satisfy the configuration itself.
+func smcUserVariables(c *config.Config, vs map[string]interface{}) []error {
+ var errs []error
+
+ cvs := make(map[string]*config.Variable)
+ for _, v := range c.Variables {
+ cvs[v.Name] = v
+ }
+
+ // Check that all required variables are present
+ required := make(map[string]struct{})
+ for _, v := range c.Variables {
+ if v.Required() {
+ required[v.Name] = struct{}{}
+ }
+ }
+ for k, _ := range vs {
+ delete(required, k)
+ }
+ if len(required) > 0 {
+ for k, _ := range required {
+ errs = append(errs, fmt.Errorf(
+ "Required variable not set: %s", k))
+ }
+ }
+
+ // Check that types match up
+ for name, proposedValue := range vs {
+ // Check for "map.key" fields. These stopped working with Terraform
+ // 0.7 but we do this to surface a better error message informing
+ // the user what happened.
+ if idx := strings.Index(name, "."); idx > 0 {
+ key := name[:idx]
+ if _, ok := cvs[key]; ok {
+ errs = append(errs, fmt.Errorf(
+ "%s: Overriding map keys with the format `name.key` is no "+
+ "longer allowed. You may still override keys by setting "+
+ "`name = { key = value }`. The maps will be merged. This "+
+ "behavior appeared in 0.7.0.", name))
+ continue
+ }
+ }
+
+ schema, ok := cvs[name]
+ if !ok {
+ continue
+ }
+
+ declaredType := schema.Type()
+
+ switch declaredType {
+ case config.VariableTypeString:
+ switch proposedValue.(type) {
+ case string:
+ continue
+ }
+ case config.VariableTypeMap:
+ switch v := proposedValue.(type) {
+ case map[string]interface{}:
+ continue
+ case []map[string]interface{}:
+ // if we have a list of 1 map, it will get coerced later as needed
+ if len(v) == 1 {
+ continue
+ }
+ }
+ case config.VariableTypeList:
+ switch proposedValue.(type) {
+ case []interface{}:
+ continue
+ }
+ }
+ errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s",
+ name, declaredType.Printable(), hclTypeName(proposedValue)))
+ }
+
+ // TODO(mitchellh): variables that are unknown
+
+ return errs
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow.go b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
new file mode 100644
index 00000000..46325595
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
@@ -0,0 +1,28 @@
+package terraform
+
+// Shadow is the interface that any "shadow" structures must implement.
+//
+// A shadow structure is an interface implementation (typically) that
+// shadows a real implementation and verifies that the same behavior occurs
+// on both. The semantics of this behavior are up to the interface itself.
+//
+// A shadow NEVER modifies real values or state. It must always be safe to use.
+//
+// For example, a ResourceProvider shadow ensures that the same operations
+// are done on the same resources with the same configurations.
+//
+// The typical usage of a shadow following this interface is to complete
+// the real operations, then call CloseShadow which tells the shadow that
+// the real side is done. Then, once the shadow is also complete, call
+// ShadowError to find any errors that may have been caught.
+type Shadow interface {
+ // CloseShadow tells the shadow that the REAL implementation is
+ // complete. Therefore, any calls that would block should now return
+ // immediately since no more changes will happen to the real side.
+ CloseShadow() error
+
+ // ShadowError returns the errors that the shadow has found.
+ // This should be called AFTER CloseShadow and AFTER the shadow is
+ // known to be complete (no more calls to it).
+ ShadowError() error
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
new file mode 100644
index 00000000..116cf84f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
@@ -0,0 +1,273 @@
+package terraform
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/helper/shadow"
+)
+
+// newShadowComponentFactory creates a shadowed contextComponentFactory
+// so that requests to create new components result in both a real and
+// shadow side.
+func newShadowComponentFactory(
+ f contextComponentFactory) (contextComponentFactory, *shadowComponentFactory) {
+ // Create the shared data
+ shared := &shadowComponentFactoryShared{contextComponentFactory: f}
+
+ // Create the real side
+ real := &shadowComponentFactory{
+ shadowComponentFactoryShared: shared,
+ }
+
+ // Create the shadow
+ shadow := &shadowComponentFactory{
+ shadowComponentFactoryShared: shared,
+ Shadow: true,
+ }
+
+ return real, shadow
+}
+
+// shadowComponentFactory is the shadow side. Any components created
+// with this factory are fake and will not cause real work to happen.
+//
+// Unlike other shadowers, the shadow component factory will allow the
+// shadow to create _any_ component even if it is never requested on the
+// real side. This is because errors will happen later downstream as function
+// calls are made to the shadows that are never matched on the real side.
+type shadowComponentFactory struct {
+ *shadowComponentFactoryShared
+
+ Shadow bool // True if this should return the shadow
+ lock sync.Mutex
+}
+
+func (f *shadowComponentFactory) ResourceProvider(
+ n, uid string) (ResourceProvider, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ real, shadow, err := f.shadowComponentFactoryShared.ResourceProvider(n, uid)
+ var result ResourceProvider = real
+ if f.Shadow {
+ result = shadow
+ }
+
+ return result, err
+}
+
+func (f *shadowComponentFactory) ResourceProvisioner(
+ n, uid string) (ResourceProvisioner, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ real, shadow, err := f.shadowComponentFactoryShared.ResourceProvisioner(n, uid)
+ var result ResourceProvisioner = real
+ if f.Shadow {
+ result = shadow
+ }
+
+ return result, err
+}
+
+// CloseShadow is called when the _real_ side is complete. This will cause
+// all future blocking operations to return immediately on the shadow to
+// ensure the shadow also completes.
+func (f *shadowComponentFactory) CloseShadow() error {
+ // If we aren't the shadow, just return
+ if !f.Shadow {
+ return nil
+ }
+
+ // Lock ourselves so we don't modify state
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ // Grab our shared state
+ shared := f.shadowComponentFactoryShared
+
+ // If we're already closed, its an error
+ if shared.closed {
+ return fmt.Errorf("component factory shadow already closed")
+ }
+
+ // Close all the providers and provisioners and return the error
+ var result error
+ for _, n := range shared.providerKeys {
+ _, shadow, err := shared.ResourceProvider(n, n)
+ if err == nil && shadow != nil {
+ if err := shadow.CloseShadow(); err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ }
+
+ for _, n := range shared.provisionerKeys {
+ _, shadow, err := shared.ResourceProvisioner(n, n)
+ if err == nil && shadow != nil {
+ if err := shadow.CloseShadow(); err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ }
+
+ // Mark ourselves as closed
+ shared.closed = true
+
+ return result
+}
+
+func (f *shadowComponentFactory) ShadowError() error {
+ // If we aren't the shadow, just return
+ if !f.Shadow {
+ return nil
+ }
+
+ // Lock ourselves so we don't modify state
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ // Grab our shared state
+ shared := f.shadowComponentFactoryShared
+
+ // If we're not closed, its an error
+ if !shared.closed {
+ return fmt.Errorf("component factory must be closed to retrieve errors")
+ }
+
+ // Close all the providers and provisioners and return the error
+ var result error
+ for _, n := range shared.providerKeys {
+ _, shadow, err := shared.ResourceProvider(n, n)
+ if err == nil && shadow != nil {
+ if err := shadow.ShadowError(); err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ }
+
+ for _, n := range shared.provisionerKeys {
+ _, shadow, err := shared.ResourceProvisioner(n, n)
+ if err == nil && shadow != nil {
+ if err := shadow.ShadowError(); err != nil {
+ result = multierror.Append(result, err)
+ }
+ }
+ }
+
+ return result
+}
+
+// shadowComponentFactoryShared is shared data between the two factories.
+//
+// It is NOT SAFE to run any function on this struct in parallel. Lock
+// access to this struct.
+type shadowComponentFactoryShared struct {
+ contextComponentFactory
+
+ closed bool
+ providers shadow.KeyedValue
+ providerKeys []string
+ provisioners shadow.KeyedValue
+ provisionerKeys []string
+}
+
+// shadowResourceProviderFactoryEntry is the entry that is stored in
+// the Shadows key/value for a provider.
+type shadowComponentFactoryProviderEntry struct {
+ Real ResourceProvider
+ Shadow shadowResourceProvider
+ Err error
+}
+
+type shadowComponentFactoryProvisionerEntry struct {
+ Real ResourceProvisioner
+ Shadow shadowResourceProvisioner
+ Err error
+}
+
+func (f *shadowComponentFactoryShared) ResourceProvider(
+ n, uid string) (ResourceProvider, shadowResourceProvider, error) {
+ // Determine if we already have a value
+ raw, ok := f.providers.ValueOk(uid)
+ if !ok {
+ // Build the entry
+ var entry shadowComponentFactoryProviderEntry
+
+ // No value, initialize. Create the original
+ p, err := f.contextComponentFactory.ResourceProvider(n, uid)
+ if err != nil {
+ entry.Err = err
+ p = nil // Just to be sure
+ }
+
+ if p != nil {
+ // Create the shadow
+ real, shadow := newShadowResourceProvider(p)
+ entry.Real = real
+ entry.Shadow = shadow
+
+ if f.closed {
+ shadow.CloseShadow()
+ }
+ }
+
+ // Store the value
+ f.providers.SetValue(uid, &entry)
+ f.providerKeys = append(f.providerKeys, uid)
+ raw = &entry
+ }
+
+ // Read the entry
+ entry, ok := raw.(*shadowComponentFactoryProviderEntry)
+ if !ok {
+ return nil, nil, fmt.Errorf("Unknown value for shadow provider: %#v", raw)
+ }
+
+ // Return
+ return entry.Real, entry.Shadow, entry.Err
+}
+
+func (f *shadowComponentFactoryShared) ResourceProvisioner(
+ n, uid string) (ResourceProvisioner, shadowResourceProvisioner, error) {
+ // Determine if we already have a value
+ raw, ok := f.provisioners.ValueOk(uid)
+ if !ok {
+ // Build the entry
+ var entry shadowComponentFactoryProvisionerEntry
+
+ // No value, initialize. Create the original
+ p, err := f.contextComponentFactory.ResourceProvisioner(n, uid)
+ if err != nil {
+ entry.Err = err
+ p = nil // Just to be sure
+ }
+
+ if p != nil {
+ // For now, just create a mock since we don't support provisioners yet
+ real, shadow := newShadowResourceProvisioner(p)
+ entry.Real = real
+ entry.Shadow = shadow
+
+ if f.closed {
+ shadow.CloseShadow()
+ }
+ }
+
+ // Store the value
+ f.provisioners.SetValue(uid, &entry)
+ f.provisionerKeys = append(f.provisionerKeys, uid)
+ raw = &entry
+ }
+
+ // Read the entry
+ entry, ok := raw.(*shadowComponentFactoryProvisionerEntry)
+ if !ok {
+ return nil, nil, fmt.Errorf("Unknown value for shadow provisioner: %#v", raw)
+ }
+
+ // Return
+ return entry.Real, entry.Shadow, entry.Err
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
new file mode 100644
index 00000000..5588af25
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
@@ -0,0 +1,158 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/mitchellh/copystructure"
+)
+
+// newShadowContext creates a new context that will shadow the given context
+// when walking the graph. The resulting context should be used _only once_
+// for a graph walk.
+//
+// The returned Shadow should be closed after the graph walk with the
+// real context is complete. Errors from the shadow can be retrieved there.
+//
+// Most importantly, any operations done on the shadow context (the returned
+// context) will NEVER affect the real context. All structures are deep
+// copied, no real providers or resources are used, etc.
+func newShadowContext(c *Context) (*Context, *Context, Shadow) {
+ // Copy the targets
+ targetRaw, err := copystructure.Copy(c.targets)
+ if err != nil {
+ panic(err)
+ }
+
+ // Copy the variables
+ varRaw, err := copystructure.Copy(c.variables)
+ if err != nil {
+ panic(err)
+ }
+
+ // Copy the provider inputs
+ providerInputRaw, err := copystructure.Copy(c.providerInputConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ // The factories
+ componentsReal, componentsShadow := newShadowComponentFactory(c.components)
+
+ // Create the shadow
+ shadow := &Context{
+ components: componentsShadow,
+ destroy: c.destroy,
+ diff: c.diff.DeepCopy(),
+ hooks: nil,
+ meta: c.meta,
+ module: c.module,
+ state: c.state.DeepCopy(),
+ targets: targetRaw.([]string),
+ variables: varRaw.(map[string]interface{}),
+
+ // NOTE(mitchellh): This is not going to work for shadows that are
+ // testing that input results in the proper end state. At the time
+ // of writing, input is not used in any state-changing graph
+ // walks anyways, so this checks nothing. We set it to this to avoid
+ // any panics but even a "nil" value worked here.
+ uiInput: new(MockUIInput),
+
+ // Hardcoded to 4 since parallelism in the shadow doesn't matter
+ // a ton since we're doing far less compared to the real side
+ // and our operations are MUCH faster.
+ parallelSem: NewSemaphore(4),
+ providerInputConfig: providerInputRaw.(map[string]map[string]interface{}),
+ }
+
+ // Create the real context. This is effectively just a copy of
+ // the context given except we need to modify some of the values
+ // to point to the real side of a shadow so the shadow can compare values.
+ real := &Context{
+ // The fields below are changed.
+ components: componentsReal,
+
+ // The fields below are direct copies
+ destroy: c.destroy,
+ diff: c.diff,
+ // diffLock - no copy
+ hooks: c.hooks,
+ meta: c.meta,
+ module: c.module,
+ sh: c.sh,
+ state: c.state,
+ // stateLock - no copy
+ targets: c.targets,
+ uiInput: c.uiInput,
+ variables: c.variables,
+
+ // l - no copy
+ parallelSem: c.parallelSem,
+ providerInputConfig: c.providerInputConfig,
+ runContext: c.runContext,
+ runContextCancel: c.runContextCancel,
+ shadowErr: c.shadowErr,
+ }
+
+ return real, shadow, &shadowContextCloser{
+ Components: componentsShadow,
+ }
+}
+
+// shadowContextVerify takes the real and shadow context and verifies they
+// have equal diffs and states.
+func shadowContextVerify(real, shadow *Context) error {
+ var result error
+
+ // The states compared must be pruned so they're minimal/clean
+ real.state.prune()
+ shadow.state.prune()
+
+ // Compare the states
+ if !real.state.Equal(shadow.state) {
+ result = multierror.Append(result, fmt.Errorf(
+ "Real and shadow states do not match! "+
+ "Real state:\n\n%s\n\n"+
+ "Shadow state:\n\n%s\n\n",
+ real.state, shadow.state))
+ }
+
+ // Compare the diffs
+ if !real.diff.Equal(shadow.diff) {
+ result = multierror.Append(result, fmt.Errorf(
+ "Real and shadow diffs do not match! "+
+ "Real diff:\n\n%s\n\n"+
+ "Shadow diff:\n\n%s\n\n",
+ real.diff, shadow.diff))
+ }
+
+ return result
+}
+
+// shadowContextCloser is the io.Closer returned by newShadowContext that
+// closes all the shadows and returns the results.
+type shadowContextCloser struct {
+ Components *shadowComponentFactory
+}
+
+// Close closes the shadow context.
+func (c *shadowContextCloser) CloseShadow() error {
+ return c.Components.CloseShadow()
+}
+
+func (c *shadowContextCloser) ShadowError() error {
+ err := c.Components.ShadowError()
+ if err == nil {
+ return nil
+ }
+
+ // This is a sad edge case: if the configuration contains uuid() at
+ // any point, we cannot reason aboyt the shadow execution. Tested
+ // with Context2Plan_shadowUuid.
+ if strings.Contains(err.Error(), "uuid()") {
+ err = nil
+ }
+
+ return err
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
new file mode 100644
index 00000000..9741d7e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
@@ -0,0 +1,815 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/helper/shadow"
+)
+
+// shadowResourceProvider implements ResourceProvider for the shadow
+// eval context defined in eval_context_shadow.go.
+//
+// This is used to verify behavior with a real provider. This shouldn't
+// be used directly.
+type shadowResourceProvider interface {
+ ResourceProvider
+ Shadow
+}
+
+// newShadowResourceProvider creates a new shadowed ResourceProvider.
+//
+// This will assume a well behaved real ResourceProvider. For example,
+// it assumes that the `Resources` call underneath doesn't change values
+// since once it is called on the real provider, it will be cached and
+// returned in the shadow since number of calls to that shouldn't affect
+// actual behavior.
+//
+// However, with calls like Apply, call order is taken into account,
+// parameters are checked for equality, etc.
+func newShadowResourceProvider(p ResourceProvider) (ResourceProvider, shadowResourceProvider) {
+ // Create the shared data
+ shared := shadowResourceProviderShared{}
+
+ // Create the real provider that does actual work
+ real := &shadowResourceProviderReal{
+ ResourceProvider: p,
+ Shared: &shared,
+ }
+
+ // Create the shadow that watches the real value
+ shadow := &shadowResourceProviderShadow{
+ Shared: &shared,
+
+ resources: p.Resources(),
+ dataSources: p.DataSources(),
+ }
+
+ return real, shadow
+}
+
+// shadowResourceProviderReal is the real resource provider. Function calls
+// to this will perform real work. This records the parameters and return
+// values and call order for the shadow to reproduce.
+type shadowResourceProviderReal struct {
+ ResourceProvider
+
+ Shared *shadowResourceProviderShared
+}
+
+func (p *shadowResourceProviderReal) Close() error {
+ var result error
+ if c, ok := p.ResourceProvider.(ResourceProviderCloser); ok {
+ result = c.Close()
+ }
+
+ p.Shared.CloseErr.SetValue(result)
+ return result
+}
+
+func (p *shadowResourceProviderReal) Input(
+ input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
+ cCopy := c.DeepCopy()
+
+ result, err := p.ResourceProvider.Input(input, c)
+ p.Shared.Input.SetValue(&shadowResourceProviderInput{
+ Config: cCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) Validate(c *ResourceConfig) ([]string, []error) {
+ warns, errs := p.ResourceProvider.Validate(c)
+ p.Shared.Validate.SetValue(&shadowResourceProviderValidate{
+ Config: c.DeepCopy(),
+ ResultWarn: warns,
+ ResultErr: errs,
+ })
+
+ return warns, errs
+}
+
+func (p *shadowResourceProviderReal) Configure(c *ResourceConfig) error {
+ cCopy := c.DeepCopy()
+
+ err := p.ResourceProvider.Configure(c)
+ p.Shared.Configure.SetValue(&shadowResourceProviderConfigure{
+ Config: cCopy,
+ Result: err,
+ })
+
+ return err
+}
+
+func (p *shadowResourceProviderReal) Stop() error {
+ return p.ResourceProvider.Stop()
+}
+
+func (p *shadowResourceProviderReal) ValidateResource(
+ t string, c *ResourceConfig) ([]string, []error) {
+ key := t
+ configCopy := c.DeepCopy()
+
+ // Real operation
+ warns, errs := p.ResourceProvider.ValidateResource(t, c)
+
+ // Initialize to ensure we always have a wrapper with a lock
+ p.Shared.ValidateResource.Init(
+ key, &shadowResourceProviderValidateResourceWrapper{})
+
+ // Get the result
+ raw := p.Shared.ValidateResource.Value(key)
+ wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
+ if !ok {
+ // If this fails then we just continue with our day... the shadow
+ // will fail to but there isn't much we can do.
+ log.Printf(
+ "[ERROR] unknown value in ValidateResource shadow value: %#v", raw)
+ return warns, errs
+ }
+
+ // Lock the wrapper for writing and record our call
+ wrapper.Lock()
+ defer wrapper.Unlock()
+
+ wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateResource{
+ Config: configCopy,
+ Warns: warns,
+ Errors: errs,
+ })
+
+ // With it locked, call SetValue again so that it triggers WaitForChange
+ p.Shared.ValidateResource.SetValue(key, wrapper)
+
+ // Return the result
+ return warns, errs
+}
+
+func (p *shadowResourceProviderReal) Apply(
+ info *InstanceInfo,
+ state *InstanceState,
+ diff *InstanceDiff) (*InstanceState, error) {
+ // Thse have to be copied before the call since call can modify
+ stateCopy := state.DeepCopy()
+ diffCopy := diff.DeepCopy()
+
+ result, err := p.ResourceProvider.Apply(info, state, diff)
+ p.Shared.Apply.SetValue(info.uniqueId(), &shadowResourceProviderApply{
+ State: stateCopy,
+ Diff: diffCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) Diff(
+ info *InstanceInfo,
+ state *InstanceState,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ // Thse have to be copied before the call since call can modify
+ stateCopy := state.DeepCopy()
+ desiredCopy := desired.DeepCopy()
+
+ result, err := p.ResourceProvider.Diff(info, state, desired)
+ p.Shared.Diff.SetValue(info.uniqueId(), &shadowResourceProviderDiff{
+ State: stateCopy,
+ Desired: desiredCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) Refresh(
+ info *InstanceInfo,
+ state *InstanceState) (*InstanceState, error) {
+ // Thse have to be copied before the call since call can modify
+ stateCopy := state.DeepCopy()
+
+ result, err := p.ResourceProvider.Refresh(info, state)
+ p.Shared.Refresh.SetValue(info.uniqueId(), &shadowResourceProviderRefresh{
+ State: stateCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) ValidateDataSource(
+ t string, c *ResourceConfig) ([]string, []error) {
+ key := t
+ configCopy := c.DeepCopy()
+
+ // Real operation
+ warns, errs := p.ResourceProvider.ValidateDataSource(t, c)
+
+ // Initialize
+ p.Shared.ValidateDataSource.Init(
+ key, &shadowResourceProviderValidateDataSourceWrapper{})
+
+ // Get the result
+ raw := p.Shared.ValidateDataSource.Value(key)
+ wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
+ if !ok {
+ // If this fails then we just continue with our day... the shadow
+ // will fail to but there isn't much we can do.
+ log.Printf(
+ "[ERROR] unknown value in ValidateDataSource shadow value: %#v", raw)
+ return warns, errs
+ }
+
+ // Lock the wrapper for writing and record our call
+ wrapper.Lock()
+ defer wrapper.Unlock()
+
+ wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateDataSource{
+ Config: configCopy,
+ Warns: warns,
+ Errors: errs,
+ })
+
+ // Set it
+ p.Shared.ValidateDataSource.SetValue(key, wrapper)
+
+ // Return the result
+ return warns, errs
+}
+
+func (p *shadowResourceProviderReal) ReadDataDiff(
+ info *InstanceInfo,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ // These have to be copied before the call since call can modify
+ desiredCopy := desired.DeepCopy()
+
+ result, err := p.ResourceProvider.ReadDataDiff(info, desired)
+ p.Shared.ReadDataDiff.SetValue(info.uniqueId(), &shadowResourceProviderReadDataDiff{
+ Desired: desiredCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+func (p *shadowResourceProviderReal) ReadDataApply(
+ info *InstanceInfo,
+ diff *InstanceDiff) (*InstanceState, error) {
+ // Thse have to be copied before the call since call can modify
+ diffCopy := diff.DeepCopy()
+
+ result, err := p.ResourceProvider.ReadDataApply(info, diff)
+ p.Shared.ReadDataApply.SetValue(info.uniqueId(), &shadowResourceProviderReadDataApply{
+ Diff: diffCopy,
+ Result: result.DeepCopy(),
+ ResultErr: err,
+ })
+
+ return result, err
+}
+
+// shadowResourceProviderShadow is the shadow resource provider. Function
+// calls never affect real resources. This is paired with the "real" side
+// which must be called properly to enable recording.
+type shadowResourceProviderShadow struct {
+ Shared *shadowResourceProviderShared
+
+ // Cached values that are expected to not change
+ resources []ResourceType
+ dataSources []DataSource
+
+ Error error // Error is the list of errors from the shadow
+ ErrorLock sync.Mutex
+}
+
+type shadowResourceProviderShared struct {
+ // NOTE: Anytime a value is added here, be sure to add it to
+ // the Close() method so that it is closed.
+
+ CloseErr shadow.Value
+ Input shadow.Value
+ Validate shadow.Value
+ Configure shadow.Value
+ ValidateResource shadow.KeyedValue
+ Apply shadow.KeyedValue
+ Diff shadow.KeyedValue
+ Refresh shadow.KeyedValue
+ ValidateDataSource shadow.KeyedValue
+ ReadDataDiff shadow.KeyedValue
+ ReadDataApply shadow.KeyedValue
+}
+
+func (p *shadowResourceProviderShared) Close() error {
+ return shadow.Close(p)
+}
+
+func (p *shadowResourceProviderShadow) CloseShadow() error {
+ err := p.Shared.Close()
+ if err != nil {
+ err = fmt.Errorf("close error: %s", err)
+ }
+
+ return err
+}
+
+func (p *shadowResourceProviderShadow) ShadowError() error {
+ return p.Error
+}
+
+func (p *shadowResourceProviderShadow) Resources() []ResourceType {
+ return p.resources
+}
+
+func (p *shadowResourceProviderShadow) DataSources() []DataSource {
+ return p.dataSources
+}
+
+func (p *shadowResourceProviderShadow) Close() error {
+ v := p.Shared.CloseErr.Value()
+ if v == nil {
+ return nil
+ }
+
+ return v.(error)
+}
+
+func (p *shadowResourceProviderShadow) Input(
+ input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
+ // Get the result of the input call
+ raw := p.Shared.Input.Value()
+ if raw == nil {
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderInput)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'input' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !c.Equal(result.Config) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Input had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
+ result.Config, c))
+ p.ErrorLock.Unlock()
+ }
+
+ // Return the results
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Validate(c *ResourceConfig) ([]string, []error) {
+ // Get the result of the validate call
+ raw := p.Shared.Validate.Value()
+ if raw == nil {
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderValidate)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'validate' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !c.Equal(result.Config) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Validate had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
+ result.Config, c))
+ p.ErrorLock.Unlock()
+ }
+
+ // Return the results
+ return result.ResultWarn, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Configure(c *ResourceConfig) error {
+ // Get the result of the call
+ raw := p.Shared.Configure.Value()
+ if raw == nil {
+ return nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderConfigure)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'configure' shadow value: %#v", raw))
+ return nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !c.Equal(result.Config) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Configure had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
+ result.Config, c))
+ p.ErrorLock.Unlock()
+ }
+
+ // Return the results
+ return result.Result
+}
+
+// Stop returns immediately.
+func (p *shadowResourceProviderShadow) Stop() error {
+ return nil
+}
+
+func (p *shadowResourceProviderShadow) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
+ // Unique key
+ key := t
+
+ // Get the initial value
+ raw := p.Shared.ValidateResource.Value(key)
+
+ // Find a validation with our configuration
+ var result *shadowResourceProviderValidateResource
+ for {
+ // Get the value
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ValidateResource' call for %q:\n\n%#v",
+ key, c))
+ return nil, nil
+ }
+
+ wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ValidateResource' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Look for the matching call with our configuration
+ wrapper.RLock()
+ for _, call := range wrapper.Calls {
+ if call.Config.Equal(c) {
+ result = call
+ break
+ }
+ }
+ wrapper.RUnlock()
+
+ // If we found a result, exit
+ if result != nil {
+ break
+ }
+
+ // Wait for a change so we can get the wrapper again
+ raw = p.Shared.ValidateResource.WaitForChange(key)
+ }
+
+ return result.Warns, result.Errors
+}
+
+func (p *shadowResourceProviderShadow) Apply(
+ info *InstanceInfo,
+ state *InstanceState,
+ diff *InstanceDiff) (*InstanceState, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.Apply.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'apply' call for %q:\n\n%#v\n\n%#v",
+ key, state, diff))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderApply)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'apply' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !state.Equal(result.State) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Apply %q: state had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.State, state))
+ p.ErrorLock.Unlock()
+ }
+
+ if !diff.Equal(result.Diff) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Apply %q: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.Diff, diff))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Diff(
+ info *InstanceInfo,
+ state *InstanceState,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.Diff.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'diff' call for %q:\n\n%#v\n\n%#v",
+ key, state, desired))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderDiff)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'diff' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !state.Equal(result.State) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.State, state))
+ p.ErrorLock.Unlock()
+ }
+ if !desired.Equal(result.Desired) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.Desired, desired))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Refresh(
+ info *InstanceInfo,
+ state *InstanceState) (*InstanceState, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.Refresh.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'refresh' call for %q:\n\n%#v",
+ key, state))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderRefresh)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'refresh' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !state.Equal(result.State) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Refresh %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.State, state))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) ValidateDataSource(
+ t string, c *ResourceConfig) ([]string, []error) {
+ // Unique key
+ key := t
+
+ // Get the initial value
+ raw := p.Shared.ValidateDataSource.Value(key)
+
+ // Find a validation with our configuration
+ var result *shadowResourceProviderValidateDataSource
+ for {
+ // Get the value
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ValidateDataSource' call for %q:\n\n%#v",
+ key, c))
+ return nil, nil
+ }
+
+ wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ValidateDataSource' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // Look for the matching call with our configuration
+ wrapper.RLock()
+ for _, call := range wrapper.Calls {
+ if call.Config.Equal(c) {
+ result = call
+ break
+ }
+ }
+ wrapper.RUnlock()
+
+ // If we found a result, exit
+ if result != nil {
+ break
+ }
+
+ // Wait for a change so we can get the wrapper again
+ raw = p.Shared.ValidateDataSource.WaitForChange(key)
+ }
+
+ return result.Warns, result.Errors
+}
+
+func (p *shadowResourceProviderShadow) ReadDataDiff(
+ info *InstanceInfo,
+ desired *ResourceConfig) (*InstanceDiff, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.ReadDataDiff.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ReadDataDiff' call for %q:\n\n%#v",
+ key, desired))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderReadDataDiff)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ReadDataDiff' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !desired.Equal(result.Desired) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "ReadDataDiff %q had unequal configs (real, then shadow):\n\n%#v\n\n%#v",
+ key, result.Desired, desired))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) ReadDataApply(
+ info *InstanceInfo,
+ d *InstanceDiff) (*InstanceState, error) {
+ // Unique key
+ key := info.uniqueId()
+ raw := p.Shared.ReadDataApply.Value(key)
+ if raw == nil {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ReadDataApply' call for %q:\n\n%#v",
+ key, d))
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProviderReadDataApply)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'ReadDataApply' shadow value for %q: %#v", key, raw))
+ return nil, nil
+ }
+
+ // Compare the parameters, which should be identical
+ if !d.Equal(result.Diff) {
+ p.ErrorLock.Lock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "ReadDataApply: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
+ result.Diff, d))
+ p.ErrorLock.Unlock()
+ }
+
+ return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
+ panic("import not supported by shadow graph")
+}
+
+// The structs for the various function calls are put below. These structs
+// are used to carry call information across the real/shadow boundaries.
+
+type shadowResourceProviderInput struct {
+ Config *ResourceConfig
+ Result *ResourceConfig
+ ResultErr error
+}
+
+type shadowResourceProviderValidate struct {
+ Config *ResourceConfig
+ ResultWarn []string
+ ResultErr []error
+}
+
+type shadowResourceProviderConfigure struct {
+ Config *ResourceConfig
+ Result error
+}
+
+type shadowResourceProviderValidateResourceWrapper struct {
+ sync.RWMutex
+
+ Calls []*shadowResourceProviderValidateResource
+}
+
+type shadowResourceProviderValidateResource struct {
+ Config *ResourceConfig
+ Warns []string
+ Errors []error
+}
+
+type shadowResourceProviderApply struct {
+ State *InstanceState
+ Diff *InstanceDiff
+ Result *InstanceState
+ ResultErr error
+}
+
+type shadowResourceProviderDiff struct {
+ State *InstanceState
+ Desired *ResourceConfig
+ Result *InstanceDiff
+ ResultErr error
+}
+
+type shadowResourceProviderRefresh struct {
+ State *InstanceState
+ Result *InstanceState
+ ResultErr error
+}
+
+type shadowResourceProviderValidateDataSourceWrapper struct {
+ sync.RWMutex
+
+ Calls []*shadowResourceProviderValidateDataSource
+}
+
+type shadowResourceProviderValidateDataSource struct {
+ Config *ResourceConfig
+ Warns []string
+ Errors []error
+}
+
+type shadowResourceProviderReadDataDiff struct {
+ Desired *ResourceConfig
+ Result *InstanceDiff
+ ResultErr error
+}
+
+type shadowResourceProviderReadDataApply struct {
+ Diff *InstanceDiff
+ Result *InstanceState
+ ResultErr error
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
new file mode 100644
index 00000000..60a49088
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
@@ -0,0 +1,282 @@
+package terraform
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/helper/shadow"
+)
+
+// shadowResourceProvisioner implements ResourceProvisioner for the shadow
+// eval context defined in eval_context_shadow.go.
+//
+// This is used to verify behavior with a real provisioner. This shouldn't
+// be used directly.
+type shadowResourceProvisioner interface {
+ ResourceProvisioner
+ Shadow
+}
+
+// newShadowResourceProvisioner creates a new shadowed ResourceProvisioner.
+func newShadowResourceProvisioner(
+ p ResourceProvisioner) (ResourceProvisioner, shadowResourceProvisioner) {
+ // Create the shared data
+ shared := shadowResourceProvisionerShared{
+ Validate: shadow.ComparedValue{
+ Func: shadowResourceProvisionerValidateCompare,
+ },
+ }
+
+ // Create the real provisioner that does actual work
+ real := &shadowResourceProvisionerReal{
+ ResourceProvisioner: p,
+ Shared: &shared,
+ }
+
+ // Create the shadow that watches the real value
+ shadow := &shadowResourceProvisionerShadow{
+ Shared: &shared,
+ }
+
+ return real, shadow
+}
+
+// shadowResourceProvisionerReal is the real resource provisioner. Function calls
+// to this will perform real work. This records the parameters and return
+// values and call order for the shadow to reproduce.
+type shadowResourceProvisionerReal struct {
+ ResourceProvisioner
+
+ Shared *shadowResourceProvisionerShared
+}
+
+func (p *shadowResourceProvisionerReal) Close() error {
+ var result error
+ if c, ok := p.ResourceProvisioner.(ResourceProvisionerCloser); ok {
+ result = c.Close()
+ }
+
+ p.Shared.CloseErr.SetValue(result)
+ return result
+}
+
+func (p *shadowResourceProvisionerReal) Validate(c *ResourceConfig) ([]string, []error) {
+ warns, errs := p.ResourceProvisioner.Validate(c)
+ p.Shared.Validate.SetValue(&shadowResourceProvisionerValidate{
+ Config: c,
+ ResultWarn: warns,
+ ResultErr: errs,
+ })
+
+ return warns, errs
+}
+
+func (p *shadowResourceProvisionerReal) Apply(
+ output UIOutput, s *InstanceState, c *ResourceConfig) error {
+ err := p.ResourceProvisioner.Apply(output, s, c)
+
+ // Write the result, grab a lock for writing. This should nver
+ // block long since the operations below don't block.
+ p.Shared.ApplyLock.Lock()
+ defer p.Shared.ApplyLock.Unlock()
+
+ key := s.ID
+ raw, ok := p.Shared.Apply.ValueOk(key)
+ if !ok {
+ // Setup a new value
+ raw = &shadow.ComparedValue{
+ Func: shadowResourceProvisionerApplyCompare,
+ }
+
+ // Set it
+ p.Shared.Apply.SetValue(key, raw)
+ }
+
+ compareVal, ok := raw.(*shadow.ComparedValue)
+ if !ok {
+ // Just log and return so that we don't cause the real side
+ // any side effects.
+ log.Printf("[ERROR] unknown value in 'apply': %#v", raw)
+ return err
+ }
+
+ // Write the resulting value
+ compareVal.SetValue(&shadowResourceProvisionerApply{
+ Config: c,
+ ResultErr: err,
+ })
+
+ return err
+}
+
+func (p *shadowResourceProvisionerReal) Stop() error {
+ return p.ResourceProvisioner.Stop()
+}
+
+// shadowResourceProvisionerShadow is the shadow resource provisioner. Function
+// calls never affect real resources. This is paired with the "real" side
+// which must be called properly to enable recording.
+type shadowResourceProvisionerShadow struct {
+ Shared *shadowResourceProvisionerShared
+
+ Error error // Error is the list of errors from the shadow
+ ErrorLock sync.Mutex
+}
+
+type shadowResourceProvisionerShared struct {
+ // NOTE: Anytime a value is added here, be sure to add it to
+ // the Close() method so that it is closed.
+
+ CloseErr shadow.Value
+ Validate shadow.ComparedValue
+ Apply shadow.KeyedValue
+ ApplyLock sync.Mutex // For writing only
+}
+
+func (p *shadowResourceProvisionerShared) Close() error {
+ closers := []io.Closer{
+ &p.CloseErr,
+ }
+
+ for _, c := range closers {
+ // This should never happen, but we don't panic because a panic
+ // could affect the real behavior of Terraform and a shadow should
+ // never be able to do that.
+ if err := c.Close(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (p *shadowResourceProvisionerShadow) CloseShadow() error {
+ err := p.Shared.Close()
+ if err != nil {
+ err = fmt.Errorf("close error: %s", err)
+ }
+
+ return err
+}
+
+func (p *shadowResourceProvisionerShadow) ShadowError() error {
+ return p.Error
+}
+
+func (p *shadowResourceProvisionerShadow) Close() error {
+ v := p.Shared.CloseErr.Value()
+ if v == nil {
+ return nil
+ }
+
+ return v.(error)
+}
+
+func (p *shadowResourceProvisionerShadow) Validate(c *ResourceConfig) ([]string, []error) {
+ // Get the result of the validate call
+ raw := p.Shared.Validate.Value(c)
+ if raw == nil {
+ return nil, nil
+ }
+
+ result, ok := raw.(*shadowResourceProvisionerValidate)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'validate' shadow value: %#v", raw))
+ return nil, nil
+ }
+
+ // We don't need to compare configurations because we key on the
+ // configuration so just return right away.
+ return result.ResultWarn, result.ResultErr
+}
+
+func (p *shadowResourceProvisionerShadow) Apply(
+ output UIOutput, s *InstanceState, c *ResourceConfig) error {
+ // Get the value based on the key
+ key := s.ID
+ raw := p.Shared.Apply.Value(key)
+ if raw == nil {
+ return nil
+ }
+
+ compareVal, ok := raw.(*shadow.ComparedValue)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'apply' shadow value: %#v", raw))
+ return nil
+ }
+
+ // With the compared value, we compare against our config
+ raw = compareVal.Value(c)
+ if raw == nil {
+ return nil
+ }
+
+ result, ok := raw.(*shadowResourceProvisionerApply)
+ if !ok {
+ p.ErrorLock.Lock()
+ defer p.ErrorLock.Unlock()
+ p.Error = multierror.Append(p.Error, fmt.Errorf(
+ "Unknown 'apply' shadow value: %#v", raw))
+ return nil
+ }
+
+ return result.ResultErr
+}
+
+func (p *shadowResourceProvisionerShadow) Stop() error {
+ // For the shadow, we always just return nil since a Stop indicates
+ // that we were interrupted and shadows are disabled during interrupts
+ // anyways.
+ return nil
+}
+
+// The structs for the various function calls are put below. These structs
+// are used to carry call information across the real/shadow boundaries.
+
+type shadowResourceProvisionerValidate struct {
+ Config *ResourceConfig
+ ResultWarn []string
+ ResultErr []error
+}
+
+type shadowResourceProvisionerApply struct {
+ Config *ResourceConfig
+ ResultErr error
+}
+
+func shadowResourceProvisionerValidateCompare(k, v interface{}) bool {
+ c, ok := k.(*ResourceConfig)
+ if !ok {
+ return false
+ }
+
+ result, ok := v.(*shadowResourceProvisionerValidate)
+ if !ok {
+ return false
+ }
+
+ return c.Equal(result.Config)
+}
+
+func shadowResourceProvisionerApplyCompare(k, v interface{}) bool {
+ c, ok := k.(*ResourceConfig)
+ if !ok {
+ return false
+ }
+
+ result, ok := v.(*shadowResourceProvisionerApply)
+ if !ok {
+ return false
+ }
+
+ return c.Equal(result.Config)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go
new file mode 100644
index 00000000..074b6824
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state.go
@@ -0,0 +1,2118 @@
+package terraform
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/go-version"
+ "github.com/hashicorp/terraform/config"
+ "github.com/mitchellh/copystructure"
+ "github.com/satori/go.uuid"
+)
+
+const (
+ // StateVersion is the current version for our state file
+ StateVersion = 3
+)
+
+// rootModulePath is the path of the root module
+var rootModulePath = []string{"root"}
+
+// normalizeModulePath takes a raw module path and returns a path that
+// has the rootModulePath prepended to it. If I could go back in time I
+// would've never had a rootModulePath (empty path would be root). We can
+// still fix this but thats a big refactor that my branch doesn't make sense
+// for. Instead, this function normalizes paths.
+func normalizeModulePath(p []string) []string {
+ k := len(rootModulePath)
+
+ // If we already have a root module prefix, we're done
+ if len(p) >= len(rootModulePath) {
+ if reflect.DeepEqual(p[:k], rootModulePath) {
+ return p
+ }
+ }
+
+ // None? Prefix it
+ result := make([]string, len(rootModulePath)+len(p))
+ copy(result, rootModulePath)
+ copy(result[k:], p)
+ return result
+}
+
+// State keeps track of a snapshot state-of-the-world that Terraform
+// can use to keep track of what real world resources it is actually
+// managing.
+type State struct {
+ // Version is the state file protocol version.
+ Version int `json:"version"`
+
+ // TFVersion is the version of Terraform that wrote this state.
+ TFVersion string `json:"terraform_version,omitempty"`
+
+ // Serial is incremented on any operation that modifies
+ // the State file. It is used to detect potentially conflicting
+ // updates.
+ Serial int64 `json:"serial"`
+
+ // Lineage is set when a new, blank state is created and then
+ // never updated. This allows us to determine whether the serials
+ // of two states can be meaningfully compared.
+ // Apart from the guarantee that collisions between two lineages
+ // are very unlikely, this value is opaque and external callers
+ // should only compare lineage strings byte-for-byte for equality.
+ Lineage string `json:"lineage"`
+
+ // Remote is used to track the metadata required to
+ // pull and push state files from a remote storage endpoint.
+ Remote *RemoteState `json:"remote,omitempty"`
+
+ // Backend tracks the configuration for the backend in use with
+ // this state. This is used to track any changes in the backend
+ // configuration.
+ Backend *BackendState `json:"backend,omitempty"`
+
+ // Modules contains all the modules in a breadth-first order
+ Modules []*ModuleState `json:"modules"`
+
+ mu sync.Mutex
+}
+
+func (s *State) Lock() { s.mu.Lock() }
+func (s *State) Unlock() { s.mu.Unlock() }
+
+// NewState is used to initialize a blank state
+func NewState() *State {
+ s := &State{}
+ s.init()
+ return s
+}
+
+// Children returns the ModuleStates that are direct children of
+// the given path. If the path is "root", for example, then children
+// returned might be "root.child", but not "root.child.grandchild".
+func (s *State) Children(path []string) []*ModuleState {
+ s.Lock()
+ defer s.Unlock()
+ // TODO: test
+
+ return s.children(path)
+}
+
+func (s *State) children(path []string) []*ModuleState {
+ result := make([]*ModuleState, 0)
+ for _, m := range s.Modules {
+ if m == nil {
+ continue
+ }
+
+ if len(m.Path) != len(path)+1 {
+ continue
+ }
+ if !reflect.DeepEqual(path, m.Path[:len(path)]) {
+ continue
+ }
+
+ result = append(result, m)
+ }
+
+ return result
+}
+
+// AddModule adds the module with the given path to the state.
+//
+// This should be the preferred method to add module states since it
+// allows us to optimize lookups later as well as control sorting.
+func (s *State) AddModule(path []string) *ModuleState {
+ s.Lock()
+ defer s.Unlock()
+
+ return s.addModule(path)
+}
+
+func (s *State) addModule(path []string) *ModuleState {
+ // check if the module exists first
+ m := s.moduleByPath(path)
+ if m != nil {
+ return m
+ }
+
+ m = &ModuleState{Path: path}
+ m.init()
+ s.Modules = append(s.Modules, m)
+ s.sort()
+ return m
+}
+
+// ModuleByPath is used to lookup the module state for the given path.
+// This should be the preferred lookup mechanism as it allows for future
+// lookup optimizations.
+func (s *State) ModuleByPath(path []string) *ModuleState {
+ if s == nil {
+ return nil
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ return s.moduleByPath(path)
+}
+
+func (s *State) moduleByPath(path []string) *ModuleState {
+ for _, mod := range s.Modules {
+ if mod == nil {
+ continue
+ }
+ if mod.Path == nil {
+ panic("missing module path")
+ }
+ if reflect.DeepEqual(mod.Path, path) {
+ return mod
+ }
+ }
+ return nil
+}
+
+// ModuleOrphans returns all the module orphans in this state by
+// returning their full paths. These paths can be used with ModuleByPath
+// to return the actual state.
+func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string {
+ s.Lock()
+ defer s.Unlock()
+
+ return s.moduleOrphans(path, c)
+
+}
+
+func (s *State) moduleOrphans(path []string, c *config.Config) [][]string {
+ // direct keeps track of what direct children we have both in our config
+ // and in our state. childrenKeys keeps track of what isn't an orphan.
+ direct := make(map[string]struct{})
+ childrenKeys := make(map[string]struct{})
+ if c != nil {
+ for _, m := range c.Modules {
+ childrenKeys[m.Name] = struct{}{}
+ direct[m.Name] = struct{}{}
+ }
+ }
+
+ // Go over the direct children and find any that aren't in our keys.
+ var orphans [][]string
+ for _, m := range s.children(path) {
+ key := m.Path[len(m.Path)-1]
+
+ // Record that we found this key as a direct child. We use this
+ // later to find orphan nested modules.
+ direct[key] = struct{}{}
+
+ // If we have a direct child still in our config, it is not an orphan
+ if _, ok := childrenKeys[key]; ok {
+ continue
+ }
+
+ orphans = append(orphans, m.Path)
+ }
+
+ // Find the orphans that are nested...
+ for _, m := range s.Modules {
+ if m == nil {
+ continue
+ }
+
+ // We only want modules that are at least grandchildren
+ if len(m.Path) < len(path)+2 {
+ continue
+ }
+
+ // If it isn't part of our tree, continue
+ if !reflect.DeepEqual(path, m.Path[:len(path)]) {
+ continue
+ }
+
+ // If we have the direct child, then just skip it.
+ key := m.Path[len(path)]
+ if _, ok := direct[key]; ok {
+ continue
+ }
+
+ orphanPath := m.Path[:len(path)+1]
+
+ // Don't double-add if we've already added this orphan (which can happen if
+ // there are multiple nested sub-modules that get orphaned together).
+ alreadyAdded := false
+ for _, o := range orphans {
+ if reflect.DeepEqual(o, orphanPath) {
+ alreadyAdded = true
+ break
+ }
+ }
+ if alreadyAdded {
+ continue
+ }
+
+ // Add this orphan
+ orphans = append(orphans, orphanPath)
+ }
+
+ return orphans
+}
+
+// Empty returns true if the state is empty.
+func (s *State) Empty() bool {
+ if s == nil {
+ return true
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ return len(s.Modules) == 0
+}
+
+// HasResources returns true if the state contains any resources.
+//
+// This is similar to !s.Empty, but returns true also in the case where the
+// state has modules but all of them are devoid of resources.
+func (s *State) HasResources() bool {
+ if s.Empty() {
+ return false
+ }
+
+ for _, mod := range s.Modules {
+ if len(mod.Resources) > 0 {
+ return true
+ }
+ }
+
+ return false
+}
+
+// IsRemote returns true if State represents a state that exists and is
+// remote.
+func (s *State) IsRemote() bool {
+ if s == nil {
+ return false
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Remote == nil {
+ return false
+ }
+ if s.Remote.Type == "" {
+ return false
+ }
+
+ return true
+}
+
+// Validate validates the integrity of this state file.
+//
+// Certain properties of the statefile are expected by Terraform in order
+// to behave properly. The core of Terraform will assume that once it
+// receives a State structure that it has been validated. This validation
+// check should be called to ensure that.
+//
+// If this returns an error, then the user should be notified. The error
+// response will include detailed information on the nature of the error.
+func (s *State) Validate() error {
+ s.Lock()
+ defer s.Unlock()
+
+ var result error
+
+ // !!!! FOR DEVELOPERS !!!!
+ //
+ // Any errors returned from this Validate function will BLOCK TERRAFORM
+ // from loading a state file. Therefore, this should only contain checks
+ // that are only resolvable through manual intervention.
+ //
+ // !!!! FOR DEVELOPERS !!!!
+
+ // Make sure there are no duplicate module states. We open a new
+ // block here so we can use basic variable names and future validations
+ // can do the same.
+ {
+ found := make(map[string]struct{})
+ for _, ms := range s.Modules {
+ if ms == nil {
+ continue
+ }
+
+ key := strings.Join(ms.Path, ".")
+ if _, ok := found[key]; ok {
+ result = multierror.Append(result, fmt.Errorf(
+ strings.TrimSpace(stateValidateErrMultiModule), key))
+ continue
+ }
+
+ found[key] = struct{}{}
+ }
+ }
+
+ return result
+}
+
+// Remove removes the item in the state at the given address, returning
+// any errors that may have occurred.
+//
+// If the address references a module state or resource, it will delete
+// all children as well. To check what will be deleted, use a StateFilter
+// first.
+func (s *State) Remove(addr ...string) error {
+ s.Lock()
+ defer s.Unlock()
+
+ // Filter out what we need to delete
+ filter := &StateFilter{State: s}
+ results, err := filter.Filter(addr...)
+ if err != nil {
+ return err
+ }
+
+ // If we have no results, just exit early, we're not going to do anything.
+ // While what happens below is fairly fast, this is an important early
+ // exit since the prune below might modify the state more and we don't
+ // want to modify the state if we don't have to.
+ if len(results) == 0 {
+ return nil
+ }
+
+ // Go through each result and grab what we need
+ removed := make(map[interface{}]struct{})
+ for _, r := range results {
+ // Convert the path to our own type
+ path := append([]string{"root"}, r.Path...)
+
+ // If we removed this already, then ignore
+ if _, ok := removed[r.Value]; ok {
+ continue
+ }
+
+ // If we removed the parent already, then ignore
+ if r.Parent != nil {
+ if _, ok := removed[r.Parent.Value]; ok {
+ continue
+ }
+ }
+
+ // Add this to the removed list
+ removed[r.Value] = struct{}{}
+
+ switch v := r.Value.(type) {
+ case *ModuleState:
+ s.removeModule(path, v)
+ case *ResourceState:
+ s.removeResource(path, v)
+ case *InstanceState:
+ s.removeInstance(path, r.Parent.Value.(*ResourceState), v)
+ default:
+ return fmt.Errorf("unknown type to delete: %T", r.Value)
+ }
+ }
+
+ // Prune since the removal functions often do the bare minimum to
+ // remove a thing and may leave around dangling empty modules, resources,
+ // etc. Prune will clean that all up.
+ s.prune()
+
+ return nil
+}
+
+func (s *State) removeModule(path []string, v *ModuleState) {
+ for i, m := range s.Modules {
+ if m == v {
+ s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil
+ return
+ }
+ }
+}
+
+func (s *State) removeResource(path []string, v *ResourceState) {
+ // Get the module this resource lives in. If it doesn't exist, we're done.
+ mod := s.moduleByPath(path)
+ if mod == nil {
+ return
+ }
+
+ // Find this resource. This is a O(N) lookup when if we had the key
+ // it could be O(1) but even with thousands of resources this shouldn't
+ // matter right now. We can easily up performance here when the time comes.
+ for k, r := range mod.Resources {
+ if r == v {
+ // Found it
+ delete(mod.Resources, k)
+ return
+ }
+ }
+}
+
+func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) {
+ // Go through the resource and find the instance that matches this
+ // (if any) and remove it.
+
+ // Check primary
+ if r.Primary == v {
+ r.Primary = nil
+ return
+ }
+
+ // Check lists
+ lists := [][]*InstanceState{r.Deposed}
+ for _, is := range lists {
+ for i, instance := range is {
+ if instance == v {
+ // Found it, remove it
+ is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil
+
+ // Done
+ return
+ }
+ }
+ }
+}
+
+// RootModule returns the ModuleState for the root module
+func (s *State) RootModule() *ModuleState {
+ root := s.ModuleByPath(rootModulePath)
+ if root == nil {
+ panic("missing root module")
+ }
+ return root
+}
+
+// Equal tests if one state is equal to another.
+func (s *State) Equal(other *State) bool {
+ // If one is nil, we do a direct check
+ if s == nil || other == nil {
+ return s == other
+ }
+
+ s.Lock()
+ defer s.Unlock()
+ return s.equal(other)
+}
+
+func (s *State) equal(other *State) bool {
+ if s == nil || other == nil {
+ return s == other
+ }
+
+ // If the versions are different, they're certainly not equal
+ if s.Version != other.Version {
+ return false
+ }
+
+ // If any of the modules are not equal, then this state isn't equal
+ if len(s.Modules) != len(other.Modules) {
+ return false
+ }
+ for _, m := range s.Modules {
+ // This isn't very optimal currently but works.
+ otherM := other.moduleByPath(m.Path)
+ if otherM == nil {
+ return false
+ }
+
+ // If they're not equal, then we're not equal!
+ if !m.Equal(otherM) {
+ return false
+ }
+ }
+
+ return true
+}
+
+type StateAgeComparison int
+
+const (
+ StateAgeEqual StateAgeComparison = 0
+ StateAgeReceiverNewer StateAgeComparison = 1
+ StateAgeReceiverOlder StateAgeComparison = -1
+)
+
+// CompareAges compares one state with another for which is "older".
+//
+// This is a simple check using the state's serial, and is thus only as
+// reliable as the serial itself. In the normal case, only one state
+// exists for a given combination of lineage/serial, but Terraform
+// does not guarantee this and so the result of this method should be
+// used with care.
+//
+// Returns an integer that is negative if the receiver is older than
+// the argument, positive if the converse, and zero if they are equal.
+// An error is returned if the two states are not of the same lineage,
+// in which case the integer returned has no meaning.
+func (s *State) CompareAges(other *State) (StateAgeComparison, error) {
+ // nil states are "older" than actual states
+ switch {
+ case s != nil && other == nil:
+ return StateAgeReceiverNewer, nil
+ case s == nil && other != nil:
+ return StateAgeReceiverOlder, nil
+ case s == nil && other == nil:
+ return StateAgeEqual, nil
+ }
+
+ if !s.SameLineage(other) {
+ return StateAgeEqual, fmt.Errorf(
+ "can't compare two states of differing lineage",
+ )
+ }
+
+ s.Lock()
+ defer s.Unlock()
+
+ switch {
+ case s.Serial < other.Serial:
+ return StateAgeReceiverOlder, nil
+ case s.Serial > other.Serial:
+ return StateAgeReceiverNewer, nil
+ default:
+ return StateAgeEqual, nil
+ }
+}
+
+// SameLineage returns true only if the state given in argument belongs
+// to the same "lineage" of states as the receiver.
+func (s *State) SameLineage(other *State) bool {
+ s.Lock()
+ defer s.Unlock()
+
+ // If one of the states has no lineage then it is assumed to predate
+ // this concept, and so we'll accept it as belonging to any lineage
+ // so that a lineage string can be assigned to newer versions
+ // without breaking compatibility with older versions.
+ if s.Lineage == "" || other.Lineage == "" {
+ return true
+ }
+
+ return s.Lineage == other.Lineage
+}
+
+// DeepCopy performs a deep copy of the state structure and returns
+// a new structure.
+func (s *State) DeepCopy() *State {
+ copy, err := copystructure.Config{Lock: true}.Copy(s)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*State)
+}
+
+// IncrementSerialMaybe increments the serial number of this state
+// if it different from the other state.
+func (s *State) IncrementSerialMaybe(other *State) {
+ if s == nil {
+ return
+ }
+ if other == nil {
+ return
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Serial > other.Serial {
+ return
+ }
+ if other.TFVersion != s.TFVersion || !s.equal(other) {
+ if other.Serial > s.Serial {
+ s.Serial = other.Serial
+ }
+
+ s.Serial++
+ }
+}
+
+// FromFutureTerraform checks if this state was written by a Terraform
+// version from the future.
+func (s *State) FromFutureTerraform() bool {
+ s.Lock()
+ defer s.Unlock()
+
+ // No TF version means it is certainly from the past
+ if s.TFVersion == "" {
+ return false
+ }
+
+ v := version.Must(version.NewVersion(s.TFVersion))
+ return SemVersion.LessThan(v)
+}
+
+func (s *State) Init() {
+ s.Lock()
+ defer s.Unlock()
+ s.init()
+}
+
+func (s *State) init() {
+ if s.Version == 0 {
+ s.Version = StateVersion
+ }
+ if s.moduleByPath(rootModulePath) == nil {
+ s.addModule(rootModulePath)
+ }
+ s.ensureHasLineage()
+
+ for _, mod := range s.Modules {
+ if mod != nil {
+ mod.init()
+ }
+ }
+
+ if s.Remote != nil {
+ s.Remote.init()
+ }
+
+}
+
+func (s *State) EnsureHasLineage() {
+ s.Lock()
+ defer s.Unlock()
+
+ s.ensureHasLineage()
+}
+
+func (s *State) ensureHasLineage() {
+ if s.Lineage == "" {
+ s.Lineage = uuid.NewV4().String()
+ log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage)
+ } else {
+ log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage)
+ }
+}
+
+// AddModuleState insert this module state and override any existing ModuleState
+func (s *State) AddModuleState(mod *ModuleState) {
+ mod.init()
+ s.Lock()
+ defer s.Unlock()
+
+ s.addModuleState(mod)
+}
+
+func (s *State) addModuleState(mod *ModuleState) {
+ for i, m := range s.Modules {
+ if reflect.DeepEqual(m.Path, mod.Path) {
+ s.Modules[i] = mod
+ return
+ }
+ }
+
+ s.Modules = append(s.Modules, mod)
+ s.sort()
+}
+
+// prune is used to remove any resources that are no longer required
+func (s *State) prune() {
+ if s == nil {
+ return
+ }
+
+ // Filter out empty modules.
+ // A module is always assumed to have a path, and it's length isn't always
+ // bounds checked later on. Modules may be "emptied" during destroy, but we
+ // never want to store those in the state.
+ for i := 0; i < len(s.Modules); i++ {
+ if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 {
+ s.Modules = append(s.Modules[:i], s.Modules[i+1:]...)
+ i--
+ }
+ }
+
+ for _, mod := range s.Modules {
+ mod.prune()
+ }
+ if s.Remote != nil && s.Remote.Empty() {
+ s.Remote = nil
+ }
+}
+
+// sort sorts the modules
+func (s *State) sort() {
+ sort.Sort(moduleStateSort(s.Modules))
+
+ // Allow modules to be sorted
+ for _, m := range s.Modules {
+ if m != nil {
+ m.sort()
+ }
+ }
+}
+
+func (s *State) String() string {
+ if s == nil {
+ return "<nil>"
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ var buf bytes.Buffer
+ for _, m := range s.Modules {
+ mStr := m.String()
+
+ // If we're the root module, we just write the output directly.
+ if reflect.DeepEqual(m.Path, rootModulePath) {
+ buf.WriteString(mStr + "\n")
+ continue
+ }
+
+ buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], ".")))
+
+ s := bufio.NewScanner(strings.NewReader(mStr))
+ for s.Scan() {
+ text := s.Text()
+ if text != "" {
+ text = " " + text
+ }
+
+ buf.WriteString(fmt.Sprintf("%s\n", text))
+ }
+ }
+
+ return strings.TrimSpace(buf.String())
+}
+
+// BackendState stores the configuration to connect to a remote backend.
+type BackendState struct {
+ Type string `json:"type"` // Backend type
+ Config map[string]interface{} `json:"config"` // Backend raw config
+
+ // Hash is the hash code to uniquely identify the original source
+ // configuration. We use this to detect when there is a change in
+ // configuration even when "type" isn't changed.
+ Hash uint64 `json:"hash"`
+}
+
+// Empty returns true if BackendState has no state.
+func (s *BackendState) Empty() bool {
+ return s == nil || s.Type == ""
+}
+
+// Rehash returns a unique content hash for this backend's configuration
+// as a uint64 value.
+// The Hash stored in the backend state needs to match the config itself, but
+// we need to compare the backend config after it has been combined with all
+// options.
+// This function must match the implementation used by config.Backend.
+func (s *BackendState) Rehash() uint64 {
+ if s == nil {
+ return 0
+ }
+
+ cfg := config.Backend{
+ Type: s.Type,
+ RawConfig: &config.RawConfig{
+ Raw: s.Config,
+ },
+ }
+
+ return cfg.Rehash()
+}
+
+// RemoteState is used to track the information about a remote
+// state store that we push/pull state to.
+type RemoteState struct {
+ // Type controls the client we use for the remote state
+ Type string `json:"type"`
+
+ // Config is used to store arbitrary configuration that
+ // is type specific
+ Config map[string]string `json:"config"`
+
+ mu sync.Mutex
+}
+
+func (s *RemoteState) Lock() { s.mu.Lock() }
+func (s *RemoteState) Unlock() { s.mu.Unlock() }
+
+func (r *RemoteState) init() {
+ r.Lock()
+ defer r.Unlock()
+
+ if r.Config == nil {
+ r.Config = make(map[string]string)
+ }
+}
+
+func (r *RemoteState) deepcopy() *RemoteState {
+ r.Lock()
+ defer r.Unlock()
+
+ confCopy := make(map[string]string, len(r.Config))
+ for k, v := range r.Config {
+ confCopy[k] = v
+ }
+ return &RemoteState{
+ Type: r.Type,
+ Config: confCopy,
+ }
+}
+
+func (r *RemoteState) Empty() bool {
+ if r == nil {
+ return true
+ }
+ r.Lock()
+ defer r.Unlock()
+
+ return r.Type == ""
+}
+
+func (r *RemoteState) Equals(other *RemoteState) bool {
+ r.Lock()
+ defer r.Unlock()
+
+ if r.Type != other.Type {
+ return false
+ }
+ if len(r.Config) != len(other.Config) {
+ return false
+ }
+ for k, v := range r.Config {
+ if other.Config[k] != v {
+ return false
+ }
+ }
+ return true
+}
+
+// OutputState is used to track the state relevant to a single output.
+type OutputState struct {
+ // Sensitive describes whether the output is considered sensitive,
+ // which may lead to masking the value on screen in some cases.
+ Sensitive bool `json:"sensitive"`
+ // Type describes the structure of Value. Valid values are "string",
+ // "map" and "list"
+ Type string `json:"type"`
+ // Value contains the value of the output, in the structure described
+ // by the Type field.
+ Value interface{} `json:"value"`
+
+ mu sync.Mutex
+}
+
+func (s *OutputState) Lock() { s.mu.Lock() }
+func (s *OutputState) Unlock() { s.mu.Unlock() }
+
+func (s *OutputState) String() string {
+ return fmt.Sprintf("%#v", s.Value)
+}
+
+// Equal compares two OutputState structures for equality. nil values are
+// considered equal.
+func (s *OutputState) Equal(other *OutputState) bool {
+ if s == nil && other == nil {
+ return true
+ }
+
+ if s == nil || other == nil {
+ return false
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Type != other.Type {
+ return false
+ }
+
+ if s.Sensitive != other.Sensitive {
+ return false
+ }
+
+ if !reflect.DeepEqual(s.Value, other.Value) {
+ return false
+ }
+
+ return true
+}
+
+func (s *OutputState) deepcopy() *OutputState {
+ if s == nil {
+ return nil
+ }
+
+ stateCopy, err := copystructure.Config{Lock: true}.Copy(s)
+ if err != nil {
+ panic(fmt.Errorf("Error copying output value: %s", err))
+ }
+
+ return stateCopy.(*OutputState)
+}
+
+// ModuleState is used to track all the state relevant to a single
+// module. Previous to Terraform 0.3, all state belonged to the "root"
+// module.
+type ModuleState struct {
+ // Path is the import path from the root module. Modules imports are
+ // always disjoint, so the path represents amodule tree
+ Path []string `json:"path"`
+
+ // Outputs declared by the module and maintained for each module
+ // even though only the root module technically needs to be kept.
+ // This allows operators to inspect values at the boundaries.
+ Outputs map[string]*OutputState `json:"outputs"`
+
+ // Resources is a mapping of the logically named resource to
+ // the state of the resource. Each resource may actually have
+ // N instances underneath, although a user only needs to think
+ // about the 1:1 case.
+ Resources map[string]*ResourceState `json:"resources"`
+
+ // Dependencies are a list of things that this module relies on
+ // existing to remain intact. For example: an module may depend
+ // on a VPC ID given by an aws_vpc resource.
+ //
+ // Terraform uses this information to build valid destruction
+ // orders and to warn the user if they're destroying a module that
+ // another resource depends on.
+ //
+ // Things can be put into this list that may not be managed by
+ // Terraform. If Terraform doesn't find a matching ID in the
+ // overall state, then it assumes it isn't managed and doesn't
+ // worry about it.
+ Dependencies []string `json:"depends_on"`
+
+ mu sync.Mutex
+}
+
+func (s *ModuleState) Lock() { s.mu.Lock() }
+func (s *ModuleState) Unlock() { s.mu.Unlock() }
+
+// Equal tests whether one module state is equal to another.
+func (m *ModuleState) Equal(other *ModuleState) bool {
+ m.Lock()
+ defer m.Unlock()
+
+ // Paths must be equal
+ if !reflect.DeepEqual(m.Path, other.Path) {
+ return false
+ }
+
+ // Outputs must be equal
+ if len(m.Outputs) != len(other.Outputs) {
+ return false
+ }
+ for k, v := range m.Outputs {
+ if !other.Outputs[k].Equal(v) {
+ return false
+ }
+ }
+
+ // Dependencies must be equal. This sorts these in place but
+ // this shouldn't cause any problems.
+ sort.Strings(m.Dependencies)
+ sort.Strings(other.Dependencies)
+ if len(m.Dependencies) != len(other.Dependencies) {
+ return false
+ }
+ for i, d := range m.Dependencies {
+ if other.Dependencies[i] != d {
+ return false
+ }
+ }
+
+ // Resources must be equal
+ if len(m.Resources) != len(other.Resources) {
+ return false
+ }
+ for k, r := range m.Resources {
+ otherR, ok := other.Resources[k]
+ if !ok {
+ return false
+ }
+
+ if !r.Equal(otherR) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// IsRoot says whether or not this module diff is for the root module.
+func (m *ModuleState) IsRoot() bool {
+ m.Lock()
+ defer m.Unlock()
+ return reflect.DeepEqual(m.Path, rootModulePath)
+}
+
+// IsDescendent returns true if other is a descendent of this module.
+func (m *ModuleState) IsDescendent(other *ModuleState) bool {
+ m.Lock()
+ defer m.Unlock()
+
+ i := len(m.Path)
+ return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path)
+}
+
+// Orphans returns a list of keys of resources that are in the State
+// but aren't present in the configuration itself. Hence, these keys
+// represent the state of resources that are orphans.
+func (m *ModuleState) Orphans(c *config.Config) []string {
+ m.Lock()
+ defer m.Unlock()
+
+ keys := make(map[string]struct{})
+ for k, _ := range m.Resources {
+ keys[k] = struct{}{}
+ }
+
+ if c != nil {
+ for _, r := range c.Resources {
+ delete(keys, r.Id())
+
+ for k, _ := range keys {
+ if strings.HasPrefix(k, r.Id()+".") {
+ delete(keys, k)
+ }
+ }
+ }
+ }
+
+ result := make([]string, 0, len(keys))
+ for k, _ := range keys {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+// View returns a view with the given resource prefix.
+func (m *ModuleState) View(id string) *ModuleState {
+ if m == nil {
+ return m
+ }
+
+ r := m.deepcopy()
+ for k, _ := range r.Resources {
+ if id == k || strings.HasPrefix(k, id+".") {
+ continue
+ }
+
+ delete(r.Resources, k)
+ }
+
+ return r
+}
+
+func (m *ModuleState) init() {
+ m.Lock()
+ defer m.Unlock()
+
+ if m.Path == nil {
+ m.Path = []string{}
+ }
+ if m.Outputs == nil {
+ m.Outputs = make(map[string]*OutputState)
+ }
+ if m.Resources == nil {
+ m.Resources = make(map[string]*ResourceState)
+ }
+
+ if m.Dependencies == nil {
+ m.Dependencies = make([]string, 0)
+ }
+
+ for _, rs := range m.Resources {
+ rs.init()
+ }
+}
+
+func (m *ModuleState) deepcopy() *ModuleState {
+ if m == nil {
+ return nil
+ }
+
+ stateCopy, err := copystructure.Config{Lock: true}.Copy(m)
+ if err != nil {
+ panic(err)
+ }
+
+ return stateCopy.(*ModuleState)
+}
+
+// prune is used to remove any resources that are no longer required
+func (m *ModuleState) prune() {
+ m.Lock()
+ defer m.Unlock()
+
+ for k, v := range m.Resources {
+ if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 {
+ delete(m.Resources, k)
+ continue
+ }
+
+ v.prune()
+ }
+
+ for k, v := range m.Outputs {
+ if v.Value == config.UnknownVariableValue {
+ delete(m.Outputs, k)
+ }
+ }
+
+ m.Dependencies = uniqueStrings(m.Dependencies)
+}
+
+func (m *ModuleState) sort() {
+ for _, v := range m.Resources {
+ v.sort()
+ }
+}
+
+func (m *ModuleState) String() string {
+ m.Lock()
+ defer m.Unlock()
+
+ var buf bytes.Buffer
+
+ if len(m.Resources) == 0 {
+ buf.WriteString("<no state>")
+ }
+
+ names := make([]string, 0, len(m.Resources))
+ for name, _ := range m.Resources {
+ names = append(names, name)
+ }
+
+ sort.Sort(resourceNameSort(names))
+
+ for _, k := range names {
+ rs := m.Resources[k]
+ var id string
+ if rs.Primary != nil {
+ id = rs.Primary.ID
+ }
+ if id == "" {
+ id = "<not created>"
+ }
+
+ taintStr := ""
+ if rs.Primary.Tainted {
+ taintStr = " (tainted)"
+ }
+
+ deposedStr := ""
+ if len(rs.Deposed) > 0 {
+ deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed))
+ }
+
+ buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr))
+ buf.WriteString(fmt.Sprintf(" ID = %s\n", id))
+ if rs.Provider != "" {
+ buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider))
+ }
+
+ var attributes map[string]string
+ if rs.Primary != nil {
+ attributes = rs.Primary.Attributes
+ }
+ attrKeys := make([]string, 0, len(attributes))
+ for ak, _ := range attributes {
+ if ak == "id" {
+ continue
+ }
+
+ attrKeys = append(attrKeys, ak)
+ }
+
+ sort.Strings(attrKeys)
+
+ for _, ak := range attrKeys {
+ av := attributes[ak]
+ buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av))
+ }
+
+ for idx, t := range rs.Deposed {
+ taintStr := ""
+ if t.Tainted {
+ taintStr = " (tainted)"
+ }
+ buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr))
+ }
+
+ if len(rs.Dependencies) > 0 {
+ buf.WriteString(fmt.Sprintf("\n Dependencies:\n"))
+ for _, dep := range rs.Dependencies {
+ buf.WriteString(fmt.Sprintf(" %s\n", dep))
+ }
+ }
+ }
+
+ if len(m.Outputs) > 0 {
+ buf.WriteString("\nOutputs:\n\n")
+
+ ks := make([]string, 0, len(m.Outputs))
+ for k, _ := range m.Outputs {
+ ks = append(ks, k)
+ }
+
+ sort.Strings(ks)
+
+ for _, k := range ks {
+ v := m.Outputs[k]
+ switch vTyped := v.Value.(type) {
+ case string:
+ buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
+ case []interface{}:
+ buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
+ case map[string]interface{}:
+ var mapKeys []string
+ for key, _ := range vTyped {
+ mapKeys = append(mapKeys, key)
+ }
+ sort.Strings(mapKeys)
+
+ var mapBuf bytes.Buffer
+ mapBuf.WriteString("{")
+ for _, key := range mapKeys {
+ mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key]))
+ }
+ mapBuf.WriteString("}")
+
+ buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String()))
+ }
+ }
+ }
+
+ return buf.String()
+}
+
+// ResourceStateKey is a structured representation of the key used for the
+// ModuleState.Resources mapping
+type ResourceStateKey struct {
+ Name string
+ Type string
+ Mode config.ResourceMode
+ Index int
+}
+
+// Equal determines whether two ResourceStateKeys are the same
+func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool {
+ if rsk == nil || other == nil {
+ return false
+ }
+ if rsk.Mode != other.Mode {
+ return false
+ }
+ if rsk.Type != other.Type {
+ return false
+ }
+ if rsk.Name != other.Name {
+ return false
+ }
+ if rsk.Index != other.Index {
+ return false
+ }
+ return true
+}
+
+func (rsk *ResourceStateKey) String() string {
+ if rsk == nil {
+ return ""
+ }
+ var prefix string
+ switch rsk.Mode {
+ case config.ManagedResourceMode:
+ prefix = ""
+ case config.DataResourceMode:
+ prefix = "data."
+ default:
+ panic(fmt.Errorf("unknown resource mode %s", rsk.Mode))
+ }
+ if rsk.Index == -1 {
+ return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name)
+ }
+ return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index)
+}
+
+// ParseResourceStateKey accepts a key in the format used by
+// ModuleState.Resources and returns a resource name and resource index. In the
+// state, a resource has the format "type.name.index" or "type.name". In the
+// latter case, the index is returned as -1.
+func ParseResourceStateKey(k string) (*ResourceStateKey, error) {
+ parts := strings.Split(k, ".")
+ mode := config.ManagedResourceMode
+ if len(parts) > 0 && parts[0] == "data" {
+ mode = config.DataResourceMode
+ // Don't need the constant "data" prefix for parsing
+ // now that we've figured out the mode.
+ parts = parts[1:]
+ }
+ if len(parts) < 2 || len(parts) > 3 {
+ return nil, fmt.Errorf("Malformed resource state key: %s", k)
+ }
+ rsk := &ResourceStateKey{
+ Mode: mode,
+ Type: parts[0],
+ Name: parts[1],
+ Index: -1,
+ }
+ if len(parts) == 3 {
+ index, err := strconv.Atoi(parts[2])
+ if err != nil {
+ return nil, fmt.Errorf("Malformed resource state key index: %s", k)
+ }
+ rsk.Index = index
+ }
+ return rsk, nil
+}
+
+// ResourceState holds the state of a resource that is used so that
+// a provider can find and manage an existing resource as well as for
+// storing attributes that are used to populate variables of child
+// resources.
+//
+// Attributes has attributes about the created resource that are
+// queryable in interpolation: "${type.id.attr}"
+//
+// Extra is just extra data that a provider can return that we store
+// for later, but is not exposed in any way to the user.
+//
+type ResourceState struct {
+ // This is filled in and managed by Terraform, and is the resource
+ // type itself such as "mycloud_instance". If a resource provider sets
+ // this value, it won't be persisted.
+ Type string `json:"type"`
+
+ // Dependencies are a list of things that this resource relies on
+ // existing to remain intact. For example: an AWS instance might
+ // depend on a subnet (which itself might depend on a VPC, and so
+ // on).
+ //
+ // Terraform uses this information to build valid destruction
+ // orders and to warn the user if they're destroying a resource that
+ // another resource depends on.
+ //
+ // Things can be put into this list that may not be managed by
+ // Terraform. If Terraform doesn't find a matching ID in the
+ // overall state, then it assumes it isn't managed and doesn't
+ // worry about it.
+ Dependencies []string `json:"depends_on"`
+
+ // Primary is the current active instance for this resource.
+ // It can be replaced but only after a successful creation.
+ // This is the instances on which providers will act.
+ Primary *InstanceState `json:"primary"`
+
+ // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
+ // Primary is Deposed to get it out of the way for the replacement Primary to
+ // be created by Apply. If the replacement Primary creates successfully, the
+ // Deposed instance is cleaned up.
+ //
+ // If there were problems creating the replacement Primary, the Deposed
+ // instance and the (now tainted) replacement Primary will be swapped so the
+ // tainted replacement will be cleaned up instead.
+ //
+ // An instance will remain in the Deposed list until it is successfully
+ // destroyed and purged.
+ Deposed []*InstanceState `json:"deposed"`
+
+ // Provider is used when a resource is connected to a provider with an alias.
+ // If this string is empty, the resource is connected to the default provider,
+ // e.g. "aws_instance" goes with the "aws" provider.
+ // If the resource block contained a "provider" key, that value will be set here.
+ Provider string `json:"provider"`
+
+ mu sync.Mutex
+}
+
+func (s *ResourceState) Lock() { s.mu.Lock() }
+func (s *ResourceState) Unlock() { s.mu.Unlock() }
+
+// Equal tests whether two ResourceStates are equal.
+func (s *ResourceState) Equal(other *ResourceState) bool {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Type != other.Type {
+ return false
+ }
+
+ if s.Provider != other.Provider {
+ return false
+ }
+
+ // Dependencies must be equal
+ sort.Strings(s.Dependencies)
+ sort.Strings(other.Dependencies)
+ if len(s.Dependencies) != len(other.Dependencies) {
+ return false
+ }
+ for i, d := range s.Dependencies {
+ if other.Dependencies[i] != d {
+ return false
+ }
+ }
+
+ // States must be equal
+ if !s.Primary.Equal(other.Primary) {
+ return false
+ }
+
+ return true
+}
+
+// Taint marks a resource as tainted.
+func (s *ResourceState) Taint() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Primary != nil {
+ s.Primary.Tainted = true
+ }
+}
+
+// Untaint unmarks a resource as tainted.
+func (s *ResourceState) Untaint() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Primary != nil {
+ s.Primary.Tainted = false
+ }
+}
+
+func (s *ResourceState) init() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Primary == nil {
+ s.Primary = &InstanceState{}
+ }
+ s.Primary.init()
+
+ if s.Dependencies == nil {
+ s.Dependencies = []string{}
+ }
+
+ if s.Deposed == nil {
+ s.Deposed = make([]*InstanceState, 0)
+ }
+}
+
+func (s *ResourceState) deepcopy() *ResourceState {
+ copy, err := copystructure.Config{Lock: true}.Copy(s)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*ResourceState)
+}
+
+// prune is used to remove any instances that are no longer required
+func (s *ResourceState) prune() {
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.Deposed)
+ for i := 0; i < n; i++ {
+ inst := s.Deposed[i]
+ if inst == nil || inst.ID == "" {
+ copy(s.Deposed[i:], s.Deposed[i+1:])
+ s.Deposed[n-1] = nil
+ n--
+ i--
+ }
+ }
+ s.Deposed = s.Deposed[:n]
+
+ s.Dependencies = uniqueStrings(s.Dependencies)
+}
+
+func (s *ResourceState) sort() {
+ s.Lock()
+ defer s.Unlock()
+
+ sort.Strings(s.Dependencies)
+}
+
+func (s *ResourceState) String() string {
+ s.Lock()
+ defer s.Unlock()
+
+ var buf bytes.Buffer
+ buf.WriteString(fmt.Sprintf("Type = %s", s.Type))
+ return buf.String()
+}
+
+// InstanceState is used to track the unique state information belonging
+// to a given instance.
+type InstanceState struct {
+ // A unique ID for this resource. This is opaque to Terraform
+ // and is only meant as a lookup mechanism for the providers.
+ ID string `json:"id"`
+
+ // Attributes are basic information about the resource. Any keys here
+ // are accessible in variable format within Terraform configurations:
+ // ${resourcetype.name.attribute}.
+ Attributes map[string]string `json:"attributes"`
+
+ // Ephemeral is used to store any state associated with this instance
+ // that is necessary for the Terraform run to complete, but is not
+ // persisted to a state file.
+ Ephemeral EphemeralState `json:"-"`
+
+ // Meta is a simple K/V map that is persisted to the State but otherwise
+ // ignored by Terraform core. It's meant to be used for accounting by
+ // external client code. The value here must only contain Go primitives
+ // and collections.
+ Meta map[string]interface{} `json:"meta"`
+
+ // Tainted is used to mark a resource for recreation.
+ Tainted bool `json:"tainted"`
+
+ mu sync.Mutex
+}
+
+func (s *InstanceState) Lock() { s.mu.Lock() }
+func (s *InstanceState) Unlock() { s.mu.Unlock() }
+
+func (s *InstanceState) init() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Attributes == nil {
+ s.Attributes = make(map[string]string)
+ }
+ if s.Meta == nil {
+ s.Meta = make(map[string]interface{})
+ }
+ s.Ephemeral.init()
+}
+
+// Copy all the Fields from another InstanceState
+func (s *InstanceState) Set(from *InstanceState) {
+ s.Lock()
+ defer s.Unlock()
+
+ from.Lock()
+ defer from.Unlock()
+
+ s.ID = from.ID
+ s.Attributes = from.Attributes
+ s.Ephemeral = from.Ephemeral
+ s.Meta = from.Meta
+ s.Tainted = from.Tainted
+}
+
+func (s *InstanceState) DeepCopy() *InstanceState {
+ copy, err := copystructure.Config{Lock: true}.Copy(s)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*InstanceState)
+}
+
+func (s *InstanceState) Empty() bool {
+ if s == nil {
+ return true
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ return s.ID == ""
+}
+
+func (s *InstanceState) Equal(other *InstanceState) bool {
+ // Short circuit some nil checks
+ if s == nil || other == nil {
+ return s == other
+ }
+ s.Lock()
+ defer s.Unlock()
+
+ // IDs must be equal
+ if s.ID != other.ID {
+ return false
+ }
+
+ // Attributes must be equal
+ if len(s.Attributes) != len(other.Attributes) {
+ return false
+ }
+ for k, v := range s.Attributes {
+ otherV, ok := other.Attributes[k]
+ if !ok {
+ return false
+ }
+
+ if v != otherV {
+ return false
+ }
+ }
+
+ // Meta must be equal
+ if len(s.Meta) != len(other.Meta) {
+ return false
+ }
+ if s.Meta != nil && other.Meta != nil {
+ // We only do the deep check if both are non-nil. If one is nil
+ // we treat it as equal since their lengths are both zero (check
+ // above).
+ if !reflect.DeepEqual(s.Meta, other.Meta) {
+ return false
+ }
+ }
+
+ if s.Tainted != other.Tainted {
+ return false
+ }
+
+ return true
+}
+
+// MergeDiff takes a ResourceDiff and merges the attributes into
+// this resource state in order to generate a new state. This new
+// state can be used to provide updated attribute lookups for
+// variable interpolation.
+//
+// If the diff attribute requires computing the value, and hence
+// won't be available until apply, the value is replaced with the
+// computeID.
+func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState {
+ result := s.DeepCopy()
+ if result == nil {
+ result = new(InstanceState)
+ }
+ result.init()
+
+ if s != nil {
+ s.Lock()
+ defer s.Unlock()
+ for k, v := range s.Attributes {
+ result.Attributes[k] = v
+ }
+ }
+ if d != nil {
+ for k, diff := range d.CopyAttributes() {
+ if diff.NewRemoved {
+ delete(result.Attributes, k)
+ continue
+ }
+ if diff.NewComputed {
+ result.Attributes[k] = config.UnknownVariableValue
+ continue
+ }
+
+ result.Attributes[k] = diff.New
+ }
+ }
+
+ return result
+}
+
+func (s *InstanceState) String() string {
+ s.Lock()
+ defer s.Unlock()
+
+ var buf bytes.Buffer
+
+ if s == nil || s.ID == "" {
+ return "<not created>"
+ }
+
+ buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID))
+
+ attributes := s.Attributes
+ attrKeys := make([]string, 0, len(attributes))
+ for ak, _ := range attributes {
+ if ak == "id" {
+ continue
+ }
+
+ attrKeys = append(attrKeys, ak)
+ }
+ sort.Strings(attrKeys)
+
+ for _, ak := range attrKeys {
+ av := attributes[ak]
+ buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av))
+ }
+
+ buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted))
+
+ return buf.String()
+}
+
+// EphemeralState is used for transient state that is only kept in-memory
+type EphemeralState struct {
+ // ConnInfo is used for the providers to export information which is
+ // used to connect to the resource for provisioning. For example,
+ // this could contain SSH or WinRM credentials.
+ ConnInfo map[string]string `json:"-"`
+
+ // Type is used to specify the resource type for this instance. This is only
+ // required for import operations (as documented). If the documentation
+ // doesn't state that you need to set this, then don't worry about
+ // setting it.
+ Type string `json:"-"`
+}
+
+func (e *EphemeralState) init() {
+ if e.ConnInfo == nil {
+ e.ConnInfo = make(map[string]string)
+ }
+}
+
+func (e *EphemeralState) DeepCopy() *EphemeralState {
+ copy, err := copystructure.Config{Lock: true}.Copy(e)
+ if err != nil {
+ panic(err)
+ }
+
+ return copy.(*EphemeralState)
+}
+
+type jsonStateVersionIdentifier struct {
+ Version int `json:"version"`
+}
+
+// Check if this is a V0 format - the magic bytes at the start of the file
+// should be "tfstate" if so. We no longer support upgrading this type of
+// state but return an error message explaining to a user how they can
+// upgrade via the 0.6.x series.
+func testForV0State(buf *bufio.Reader) error {
+ start, err := buf.Peek(len("tfstate"))
+ if err != nil {
+ return fmt.Errorf("Failed to check for magic bytes: %v", err)
+ }
+ if string(start) == "tfstate" {
+ return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" +
+ "format which was used prior to Terraform 0.3. Please upgrade\n" +
+ "this state file using Terraform 0.6.16 prior to using it with\n" +
+ "Terraform 0.7.")
+ }
+
+ return nil
+}
+
+// ErrNoState is returned by ReadState when the io.Reader contains no data
+var ErrNoState = errors.New("no state")
+
+// ReadState reads a state structure out of a reader in the format that
+// was written by WriteState.
+func ReadState(src io.Reader) (*State, error) {
+ buf := bufio.NewReader(src)
+ if _, err := buf.Peek(1); err != nil {
+ // the error is either io.EOF or "invalid argument", and both are from
+ // an empty state.
+ return nil, ErrNoState
+ }
+
+ if err := testForV0State(buf); err != nil {
+ return nil, err
+ }
+
+ // If we are JSON we buffer the whole thing in memory so we can read it twice.
+ // This is suboptimal, but will work for now.
+ jsonBytes, err := ioutil.ReadAll(buf)
+ if err != nil {
+ return nil, fmt.Errorf("Reading state file failed: %v", err)
+ }
+
+ versionIdentifier := &jsonStateVersionIdentifier{}
+ if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil {
+ return nil, fmt.Errorf("Decoding state file version failed: %v", err)
+ }
+
+ var result *State
+ switch versionIdentifier.Version {
+ case 0:
+ return nil, fmt.Errorf("State version 0 is not supported as JSON.")
+ case 1:
+ v1State, err := ReadStateV1(jsonBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ v2State, err := upgradeStateV1ToV2(v1State)
+ if err != nil {
+ return nil, err
+ }
+
+ v3State, err := upgradeStateV2ToV3(v2State)
+ if err != nil {
+ return nil, err
+ }
+
+ // increment the Serial whenever we upgrade state
+ v3State.Serial++
+ result = v3State
+ case 2:
+ v2State, err := ReadStateV2(jsonBytes)
+ if err != nil {
+ return nil, err
+ }
+ v3State, err := upgradeStateV2ToV3(v2State)
+ if err != nil {
+ return nil, err
+ }
+
+ v3State.Serial++
+ result = v3State
+ case 3:
+ v3State, err := ReadStateV3(jsonBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ result = v3State
+ default:
+ return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
+ SemVersion.String(), versionIdentifier.Version)
+ }
+
+ // If we reached this place we must have a result set
+ if result == nil {
+ panic("resulting state in load not set, assertion failed")
+ }
+
+ // Prune the state when read it. Its possible to write unpruned states or
+ // for a user to make a state unpruned (nil-ing a module state for example).
+ result.prune()
+
+ // Validate the state file is valid
+ if err := result.Validate(); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+func ReadStateV1(jsonBytes []byte) (*stateV1, error) {
+ v1State := &stateV1{}
+ if err := json.Unmarshal(jsonBytes, v1State); err != nil {
+ return nil, fmt.Errorf("Decoding state file failed: %v", err)
+ }
+
+ if v1State.Version != 1 {
+ return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+
+ "read %d, expected 1", v1State.Version)
+ }
+
+ return v1State, nil
+}
+
+func ReadStateV2(jsonBytes []byte) (*State, error) {
+ state := &State{}
+ if err := json.Unmarshal(jsonBytes, state); err != nil {
+ return nil, fmt.Errorf("Decoding state file failed: %v", err)
+ }
+
+ // Check the version, this to ensure we don't read a future
+ // version that we don't understand
+ if state.Version > StateVersion {
+ return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
+ SemVersion.String(), state.Version)
+ }
+
+ // Make sure the version is semantic
+ if state.TFVersion != "" {
+ if _, err := version.NewVersion(state.TFVersion); err != nil {
+ return nil, fmt.Errorf(
+ "State contains invalid version: %s\n\n"+
+ "Terraform validates the version format prior to writing it. This\n"+
+ "means that this is invalid of the state becoming corrupted through\n"+
+ "some external means. Please manually modify the Terraform version\n"+
+ "field to be a proper semantic version.",
+ state.TFVersion)
+ }
+ }
+
+ // catch any unitialized fields in the state
+ state.init()
+
+ // Sort it
+ state.sort()
+
+ return state, nil
+}
+
+func ReadStateV3(jsonBytes []byte) (*State, error) {
+ state := &State{}
+ if err := json.Unmarshal(jsonBytes, state); err != nil {
+ return nil, fmt.Errorf("Decoding state file failed: %v", err)
+ }
+
+ // Check the version, this to ensure we don't read a future
+ // version that we don't understand
+ if state.Version > StateVersion {
+ return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
+ SemVersion.String(), state.Version)
+ }
+
+ // Make sure the version is semantic
+ if state.TFVersion != "" {
+ if _, err := version.NewVersion(state.TFVersion); err != nil {
+ return nil, fmt.Errorf(
+ "State contains invalid version: %s\n\n"+
+ "Terraform validates the version format prior to writing it. This\n"+
+ "means that this is invalid of the state becoming corrupted through\n"+
+ "some external means. Please manually modify the Terraform version\n"+
+ "field to be a proper semantic version.",
+ state.TFVersion)
+ }
+ }
+
+ // catch any unitialized fields in the state
+ state.init()
+
+ // Sort it
+ state.sort()
+
+ // Now we write the state back out to detect any changes in normaliztion.
+ // If our state is now written out differently, bump the serial number to
+ // prevent conflicts.
+ var buf bytes.Buffer
+ err := WriteState(state, &buf)
+ if err != nil {
+ return nil, err
+ }
+
+ if !bytes.Equal(jsonBytes, buf.Bytes()) {
+ log.Println("[INFO] state modified during read or write. incrementing serial number")
+ state.Serial++
+ }
+
+ return state, nil
+}
+
+// WriteState writes a state somewhere in a binary format.
+func WriteState(d *State, dst io.Writer) error {
+ // writing a nil state is a noop.
+ if d == nil {
+ return nil
+ }
+
+ // make sure we have no uninitialized fields
+ d.init()
+
+ // Make sure it is sorted
+ d.sort()
+
+ // Ensure the version is set
+ d.Version = StateVersion
+
+ // If the TFVersion is set, verify it. We used to just set the version
+ // here, but this isn't safe since it changes the MD5 sum on some remote
+ // state storage backends such as Atlas. We now leave it be if needed.
+ if d.TFVersion != "" {
+ if _, err := version.NewVersion(d.TFVersion); err != nil {
+ return fmt.Errorf(
+ "Error writing state, invalid version: %s\n\n"+
+ "The Terraform version when writing the state must be a semantic\n"+
+ "version.",
+ d.TFVersion)
+ }
+ }
+
+ // Encode the data in a human-friendly way
+ data, err := json.MarshalIndent(d, "", " ")
+ if err != nil {
+ return fmt.Errorf("Failed to encode state: %s", err)
+ }
+
+ // We append a newline to the data because MarshalIndent doesn't
+ data = append(data, '\n')
+
+ // Write the data out to the dst
+ if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil {
+ return fmt.Errorf("Failed to write state: %v", err)
+ }
+
+ return nil
+}
+
+// resourceNameSort implements the sort.Interface to sort name parts lexically for
+// strings and numerically for integer indexes.
+type resourceNameSort []string
+
+func (r resourceNameSort) Len() int { return len(r) }
+func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
+
+func (r resourceNameSort) Less(i, j int) bool {
+ iParts := strings.Split(r[i], ".")
+ jParts := strings.Split(r[j], ".")
+
+ end := len(iParts)
+ if len(jParts) < end {
+ end = len(jParts)
+ }
+
+ for idx := 0; idx < end; idx++ {
+ if iParts[idx] == jParts[idx] {
+ continue
+ }
+
+ // sort on the first non-matching part
+ iInt, iIntErr := strconv.Atoi(iParts[idx])
+ jInt, jIntErr := strconv.Atoi(jParts[idx])
+
+ switch {
+ case iIntErr == nil && jIntErr == nil:
+ // sort numerically if both parts are integers
+ return iInt < jInt
+ case iIntErr == nil:
+ // numbers sort before strings
+ return true
+ case jIntErr == nil:
+ return false
+ default:
+ return iParts[idx] < jParts[idx]
+ }
+ }
+
+ return r[i] < r[j]
+}
+
+// moduleStateSort implements sort.Interface to sort module states
+type moduleStateSort []*ModuleState
+
+func (s moduleStateSort) Len() int {
+ return len(s)
+}
+
+func (s moduleStateSort) Less(i, j int) bool {
+ a := s[i]
+ b := s[j]
+
+ // If either is nil, then the nil one is "less" than
+ if a == nil || b == nil {
+ return a == nil
+ }
+
+ // If the lengths are different, then the shorter one always wins
+ if len(a.Path) != len(b.Path) {
+ return len(a.Path) < len(b.Path)
+ }
+
+ // Otherwise, compare lexically
+ return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
+}
+
+func (s moduleStateSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+const stateValidateErrMultiModule = `
+Multiple modules with the same path: %s
+
+This means that there are multiple entries in the "modules" field
+in your state file that point to the same module. This will cause Terraform
+to behave in unexpected and error prone ways and is invalid. Please back up
+and modify your state file manually to resolve this.
+`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
new file mode 100644
index 00000000..11637303
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
@@ -0,0 +1,374 @@
+package terraform
+
+import "fmt"
+
+// Add adds the item in the state at the given address.
+//
+// The item can be a ModuleState, ResourceState, or InstanceState. Depending
+// on the item type, the address may or may not be valid. For example, a
+// module cannot be moved to a resource address, however a resource can be
+// moved to a module address (it retains the same name, under that resource).
+//
+// The item can also be a []*ModuleState, which is the case for nested
+// modules. In this case, Add will expect the zero-index to be the top-most
+// module to add and will only nest children from there. For semantics, this
+// is equivalent to module => module.
+//
+// The full semantics of Add:
+//
+// ┌───────────────────┬───────────────────┬───────────────────┐
+// │ Module Address │ Resource Address │ Instance Address │
+// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤
+// │ ModuleState │ ✓ │ x │ x │
+// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
+// │ ResourceState │ ✓ │ ✓ │ maybe* │
+// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
+// │ Instance State │ ✓ │ ✓ │ ✓ │
+// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘
+//
+// *maybe - Resources can be added at an instance address only if the resource
+// represents a single instance (primary). Example:
+// "aws_instance.foo" can be moved to "aws_instance.bar.tainted"
+//
+func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error {
+ // Parse the address
+
+ toAddr, err := ParseResourceAddress(toAddrRaw)
+ if err != nil {
+ return err
+ }
+
+ // Parse the from address
+ fromAddr, err := ParseResourceAddress(fromAddrRaw)
+ if err != nil {
+ return err
+ }
+
+ // Determine the types
+ from := detectValueAddLoc(raw)
+ to := detectAddrAddLoc(toAddr)
+
+ // Find the function to do this
+ fromMap, ok := stateAddFuncs[from]
+ if !ok {
+ return fmt.Errorf("invalid source to add to state: %T", raw)
+ }
+ f, ok := fromMap[to]
+ if !ok {
+ return fmt.Errorf("invalid destination: %s (%d)", toAddr, to)
+ }
+
+ // Call the migrator
+ if err := f(s, fromAddr, toAddr, raw); err != nil {
+ return err
+ }
+
+ // Prune the state
+ s.prune()
+ return nil
+}
+
+func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
+ // raw can be either *ModuleState or []*ModuleState. The former means
+ // we're moving just one module. The latter means we're moving a module
+ // and children.
+ root := raw
+ var rest []*ModuleState
+ if list, ok := raw.([]*ModuleState); ok {
+ // We need at least one item
+ if len(list) == 0 {
+ return fmt.Errorf("module move with no value to: %s", addr)
+ }
+
+ // The first item is always the root
+ root = list[0]
+ if len(list) > 1 {
+ rest = list[1:]
+ }
+ }
+
+ // Get the actual module state
+ src := root.(*ModuleState).deepcopy()
+
+ // If the target module exists, it is an error
+ path := append([]string{"root"}, addr.Path...)
+ if s.ModuleByPath(path) != nil {
+ return fmt.Errorf("module target is not empty: %s", addr)
+ }
+
+ // Create it and copy our outputs and dependencies
+ mod := s.AddModule(path)
+ mod.Outputs = src.Outputs
+ mod.Dependencies = src.Dependencies
+
+ // Go through the resources perform an add for each of those
+ for k, v := range src.Resources {
+ resourceKey, err := ParseResourceStateKey(k)
+ if err != nil {
+ return err
+ }
+
+ // Update the resource address for this
+ addrCopy := *addr
+ addrCopy.Type = resourceKey.Type
+ addrCopy.Name = resourceKey.Name
+ addrCopy.Index = resourceKey.Index
+ addrCopy.Mode = resourceKey.Mode
+
+ // Perform an add
+ if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil {
+ return err
+ }
+ }
+
+ // Add all the children if we have them
+ for _, item := range rest {
+ // If item isn't a descendent of our root, then ignore it
+ if !src.IsDescendent(item) {
+ continue
+ }
+
+ // It is! Strip the leading prefix and attach that to our address
+ extra := item.Path[len(src.Path):]
+ addrCopy := addr.Copy()
+ addrCopy.Path = append(addrCopy.Path, extra...)
+
+ // Add it
+ s.Add(fromAddr.String(), addrCopy.String(), item)
+ }
+
+ return nil
+}
+
+func stateAddFunc_Resource_Module(
+ s *State, from, to *ResourceAddress, raw interface{}) error {
+ // Build the more specific to addr
+ addr := *to
+ addr.Type = from.Type
+ addr.Name = from.Name
+
+ return s.Add(from.String(), addr.String(), raw)
+}
+
+func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
+ // raw can be either *ResourceState or []*ResourceState. The former means
+ // we're moving just one resource. The latter means we're moving a count
+ // of resources.
+ if list, ok := raw.([]*ResourceState); ok {
+ // We need at least one item
+ if len(list) == 0 {
+ return fmt.Errorf("resource move with no value to: %s", addr)
+ }
+
+ // If there is an index, this is an error since we can't assign
+ // a set of resources to a single index
+ if addr.Index >= 0 && len(list) > 1 {
+ return fmt.Errorf(
+ "multiple resources can't be moved to a single index: "+
+ "%s => %s", fromAddr, addr)
+ }
+
+ // Add each with a specific index
+ for i, rs := range list {
+ addrCopy := addr.Copy()
+ addrCopy.Index = i
+
+ if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+
+ src := raw.(*ResourceState).deepcopy()
+
+ // Initialize the resource
+ resourceRaw, exists := stateAddInitAddr(s, addr)
+ if exists {
+ return fmt.Errorf("resource exists and not empty: %s", addr)
+ }
+ resource := resourceRaw.(*ResourceState)
+ resource.Type = src.Type
+ resource.Dependencies = src.Dependencies
+ resource.Provider = src.Provider
+
+ // Move the primary
+ if src.Primary != nil {
+ addrCopy := *addr
+ addrCopy.InstanceType = TypePrimary
+ addrCopy.InstanceTypeSet = true
+ if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil {
+ return err
+ }
+ }
+
+ // Move all deposed
+ if len(src.Deposed) > 0 {
+ resource.Deposed = src.Deposed
+ }
+
+ return nil
+}
+
+func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
+ src := raw.(*InstanceState).DeepCopy()
+
+ // Create the instance
+ instanceRaw, _ := stateAddInitAddr(s, addr)
+ instance := instanceRaw.(*InstanceState)
+
+ // Set it
+ instance.Set(src)
+
+ return nil
+}
+
+func stateAddFunc_Instance_Module(
+ s *State, from, to *ResourceAddress, raw interface{}) error {
+ addr := *to
+ addr.Type = from.Type
+ addr.Name = from.Name
+
+ return s.Add(from.String(), addr.String(), raw)
+}
+
+func stateAddFunc_Instance_Resource(
+ s *State, from, to *ResourceAddress, raw interface{}) error {
+ addr := *to
+ addr.InstanceType = TypePrimary
+ addr.InstanceTypeSet = true
+
+ return s.Add(from.String(), addr.String(), raw)
+}
+
+// stateAddFunc is the type of function for adding an item to a state
+type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error
+
+// stateAddFuncs has the full matrix mapping of the state adders.
+var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc
+
+func init() {
+ stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{
+ stateAddModule: {
+ stateAddModule: stateAddFunc_Module_Module,
+ },
+ stateAddResource: {
+ stateAddModule: stateAddFunc_Resource_Module,
+ stateAddResource: stateAddFunc_Resource_Resource,
+ },
+ stateAddInstance: {
+ stateAddInstance: stateAddFunc_Instance_Instance,
+ stateAddModule: stateAddFunc_Instance_Module,
+ stateAddResource: stateAddFunc_Instance_Resource,
+ },
+ }
+}
+
+// stateAddLoc is an enum to represent the location where state is being
+// moved from/to. We use this for quick lookups in a function map.
+type stateAddLoc uint
+
+const (
+ stateAddInvalid stateAddLoc = iota
+ stateAddModule
+ stateAddResource
+ stateAddInstance
+)
+
+// detectAddrAddLoc detects the state type for the given address. This
+// function is specifically not unit tested since we consider the State.Add
+// functionality to be comprehensive enough to cover this.
+func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc {
+ if addr.Name == "" {
+ return stateAddModule
+ }
+
+ if !addr.InstanceTypeSet {
+ return stateAddResource
+ }
+
+ return stateAddInstance
+}
+
+// detectValueAddLoc determines the stateAddLoc value from the raw value
+// that is some State structure.
+func detectValueAddLoc(raw interface{}) stateAddLoc {
+ switch raw.(type) {
+ case *ModuleState:
+ return stateAddModule
+ case []*ModuleState:
+ return stateAddModule
+ case *ResourceState:
+ return stateAddResource
+ case []*ResourceState:
+ return stateAddResource
+ case *InstanceState:
+ return stateAddInstance
+ default:
+ return stateAddInvalid
+ }
+}
+
+// stateAddInitAddr takes a ResourceAddress and creates the non-existing
+// resources up to that point, returning the empty (or existing) interface
+// at that address.
+func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) {
+ addType := detectAddrAddLoc(addr)
+
+ // Get the module
+ path := append([]string{"root"}, addr.Path...)
+ exists := true
+ mod := s.ModuleByPath(path)
+ if mod == nil {
+ mod = s.AddModule(path)
+ exists = false
+ }
+ if addType == stateAddModule {
+ return mod, exists
+ }
+
+ // Add the resource
+ resourceKey := (&ResourceStateKey{
+ Name: addr.Name,
+ Type: addr.Type,
+ Index: addr.Index,
+ Mode: addr.Mode,
+ }).String()
+ exists = true
+ resource, ok := mod.Resources[resourceKey]
+ if !ok {
+ resource = &ResourceState{Type: addr.Type}
+ resource.init()
+ mod.Resources[resourceKey] = resource
+ exists = false
+ }
+ if addType == stateAddResource {
+ return resource, exists
+ }
+
+ // Get the instance
+ exists = true
+ instance := &InstanceState{}
+ switch addr.InstanceType {
+ case TypePrimary, TypeTainted:
+ if v := resource.Primary; v != nil {
+ instance = resource.Primary
+ } else {
+ exists = false
+ }
+ case TypeDeposed:
+ idx := addr.Index
+ if addr.Index < 0 {
+ idx = 0
+ }
+ if len(resource.Deposed) > idx {
+ instance = resource.Deposed[idx]
+ } else {
+ resource.Deposed = append(resource.Deposed, instance)
+ exists = false
+ }
+ }
+
+ return instance, exists
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
new file mode 100644
index 00000000..2dcb11b7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
@@ -0,0 +1,267 @@
+package terraform
+
+import (
+ "fmt"
+ "sort"
+)
+
+// StateFilter is responsible for filtering and searching a state.
+//
+// This is a separate struct from State rather than a method on State
+// because StateFilter might create sidecar data structures to optimize
+// filtering on the state.
+//
+// If you change the State, the filter created is invalid and either
+// Reset should be called or a new one should be allocated. StateFilter
+// will not watch State for changes and do this for you. If you filter after
+// changing the State without calling Reset, the behavior is not defined.
+type StateFilter struct {
+ State *State
+}
+
+// Filter takes the addresses specified by fs and finds all the matches.
+// The values of fs are resource addressing syntax that can be parsed by
+// ParseResourceAddress.
+func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) {
+ // Parse all the addresses
+ as := make([]*ResourceAddress, len(fs))
+ for i, v := range fs {
+ a, err := ParseResourceAddress(v)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing address '%s': %s", v, err)
+ }
+
+ as[i] = a
+ }
+
+ // If we weren't given any filters, then we list all
+ if len(fs) == 0 {
+ as = append(as, &ResourceAddress{Index: -1})
+ }
+
+ // Filter each of the address. We keep track of this in a map to
+ // strip duplicates.
+ resultSet := make(map[string]*StateFilterResult)
+ for _, a := range as {
+ for _, r := range f.filterSingle(a) {
+ resultSet[r.String()] = r
+ }
+ }
+
+ // Make the result list
+ results := make([]*StateFilterResult, 0, len(resultSet))
+ for _, v := range resultSet {
+ results = append(results, v)
+ }
+
+ // Sort them and return
+ sort.Sort(StateFilterResultSlice(results))
+ return results, nil
+}
+
+func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult {
+ // The slice to keep track of results
+ var results []*StateFilterResult
+
+ // Go through modules first.
+ modules := make([]*ModuleState, 0, len(f.State.Modules))
+ for _, m := range f.State.Modules {
+ if f.relevant(a, m) {
+ modules = append(modules, m)
+
+ // Only add the module to the results if we haven't specified a type.
+ // We also ignore the root module.
+ if a.Type == "" && len(m.Path) > 1 {
+ results = append(results, &StateFilterResult{
+ Path: m.Path[1:],
+ Address: (&ResourceAddress{Path: m.Path[1:]}).String(),
+ Value: m,
+ })
+ }
+ }
+ }
+
+ // With the modules set, go through all the resources within
+ // the modules to find relevant resources.
+ for _, m := range modules {
+ for n, r := range m.Resources {
+ // The name in the state contains valuable information. Parse.
+ key, err := ParseResourceStateKey(n)
+ if err != nil {
+ // If we get an error parsing, then just ignore it
+ // out of the state.
+ continue
+ }
+
+ // Older states and test fixtures often don't contain the
+ // type directly on the ResourceState. We add this so StateFilter
+ // is a bit more robust.
+ if r.Type == "" {
+ r.Type = key.Type
+ }
+
+ if f.relevant(a, r) {
+ if a.Name != "" && a.Name != key.Name {
+ // Name doesn't match
+ continue
+ }
+
+ if a.Index >= 0 && key.Index != a.Index {
+ // Index doesn't match
+ continue
+ }
+
+ if a.Name != "" && a.Name != key.Name {
+ continue
+ }
+
+ // Build the address for this resource
+ addr := &ResourceAddress{
+ Path: m.Path[1:],
+ Name: key.Name,
+ Type: key.Type,
+ Index: key.Index,
+ }
+
+ // Add the resource level result
+ resourceResult := &StateFilterResult{
+ Path: addr.Path,
+ Address: addr.String(),
+ Value: r,
+ }
+ if !a.InstanceTypeSet {
+ results = append(results, resourceResult)
+ }
+
+ // Add the instances
+ if r.Primary != nil {
+ addr.InstanceType = TypePrimary
+ addr.InstanceTypeSet = false
+ results = append(results, &StateFilterResult{
+ Path: addr.Path,
+ Address: addr.String(),
+ Parent: resourceResult,
+ Value: r.Primary,
+ })
+ }
+
+ for _, instance := range r.Deposed {
+ if f.relevant(a, instance) {
+ addr.InstanceType = TypeDeposed
+ addr.InstanceTypeSet = true
+ results = append(results, &StateFilterResult{
+ Path: addr.Path,
+ Address: addr.String(),
+ Parent: resourceResult,
+ Value: instance,
+ })
+ }
+ }
+ }
+ }
+ }
+
+ return results
+}
+
+// relevant checks for relevance of this address against the given value.
+func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool {
+ switch v := raw.(type) {
+ case *ModuleState:
+ path := v.Path[1:]
+
+ if len(addr.Path) > len(path) {
+ // Longer path in address means there is no way we match.
+ return false
+ }
+
+ // Check for a prefix match
+ for i, p := range addr.Path {
+ if path[i] != p {
+ // Any mismatches don't match.
+ return false
+ }
+ }
+
+ return true
+ case *ResourceState:
+ if addr.Type == "" {
+ // If we have no resource type, then we're interested in all!
+ return true
+ }
+
+ // If the type doesn't match we fail immediately
+ if v.Type != addr.Type {
+ return false
+ }
+
+ return true
+ default:
+ // If we don't know about it, let's just say no
+ return false
+ }
+}
+
+// StateFilterResult is a single result from a filter operation. Filter
+// can match multiple things within a state (module, resource, instance, etc.)
+// and this unifies that.
+type StateFilterResult struct {
+ // Module path of the result
+ Path []string
+
+ // Address is the address that can be used to reference this exact result.
+ Address string
+
+ // Parent, if non-nil, is a parent of this result. For instances, the
+ // parent would be a resource. For resources, the parent would be
+ // a module. For modules, this is currently nil.
+ Parent *StateFilterResult
+
+ // Value is the actual value. This must be type switched on. It can be
+ // any data structures that `State` can hold: `ModuleState`,
+ // `ResourceState`, `InstanceState`.
+ Value interface{}
+}
+
+func (r *StateFilterResult) String() string {
+ return fmt.Sprintf("%T: %s", r.Value, r.Address)
+}
+
+func (r *StateFilterResult) sortedType() int {
+ switch r.Value.(type) {
+ case *ModuleState:
+ return 0
+ case *ResourceState:
+ return 1
+ case *InstanceState:
+ return 2
+ default:
+ return 50
+ }
+}
+
+// StateFilterResultSlice is a slice of results that implements
+// sort.Interface. The sorting goal is what is most appealing to
+// human output.
+type StateFilterResultSlice []*StateFilterResult
+
+func (s StateFilterResultSlice) Len() int { return len(s) }
+func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s StateFilterResultSlice) Less(i, j int) bool {
+ a, b := s[i], s[j]
+
+ // if these address contain an index, we want to sort by index rather than name
+ addrA, errA := ParseResourceAddress(a.Address)
+ addrB, errB := ParseResourceAddress(b.Address)
+ if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index {
+ return addrA.Index < addrB.Index
+ }
+
+ // If the addresses are different it is just lexographic sorting
+ if a.Address != b.Address {
+ return a.Address < b.Address
+ }
+
+ // Addresses are the same, which means it matters on the type
+ return a.sortedType() < b.sortedType()
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
new file mode 100644
index 00000000..aa13cce8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
@@ -0,0 +1,189 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/mitchellh/copystructure"
+)
+
+// upgradeStateV1ToV2 is used to upgrade a V1 state representation
+// into a V2 state representation
+func upgradeStateV1ToV2(old *stateV1) (*State, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ remote, err := old.Remote.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading State V1: %v", err)
+ }
+
+ modules := make([]*ModuleState, len(old.Modules))
+ for i, module := range old.Modules {
+ upgraded, err := module.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading State V1: %v", err)
+ }
+ modules[i] = upgraded
+ }
+ if len(modules) == 0 {
+ modules = nil
+ }
+
+ newState := &State{
+ Version: 2,
+ Serial: old.Serial,
+ Remote: remote,
+ Modules: modules,
+ }
+
+ newState.sort()
+ newState.init()
+
+ return newState, nil
+}
+
+func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ config, err := copystructure.Copy(old.Config)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err)
+ }
+
+ return &RemoteState{
+ Type: old.Type,
+ Config: config.(map[string]string),
+ }, nil
+}
+
+func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ pathRaw, err := copystructure.Copy(old.Path)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
+ }
+ path, ok := pathRaw.([]string)
+ if !ok {
+ return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings")
+ }
+ if len(path) == 0 {
+ // We found some V1 states with a nil path. Assume root and catch
+ // duplicate path errors later (as part of Validate).
+ path = rootModulePath
+ }
+
+ // Outputs needs upgrading to use the new structure
+ outputs := make(map[string]*OutputState)
+ for key, output := range old.Outputs {
+ outputs[key] = &OutputState{
+ Type: "string",
+ Value: output,
+ Sensitive: false,
+ }
+ }
+
+ resources := make(map[string]*ResourceState)
+ for key, oldResource := range old.Resources {
+ upgraded, err := oldResource.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
+ }
+ resources[key] = upgraded
+ }
+
+ dependencies, err := copystructure.Copy(old.Dependencies)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
+ }
+
+ return &ModuleState{
+ Path: path,
+ Outputs: outputs,
+ Resources: resources,
+ Dependencies: dependencies.([]string),
+ }, nil
+}
+
+func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ dependencies, err := copystructure.Copy(old.Dependencies)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
+ }
+
+ primary, err := old.Primary.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
+ }
+
+ deposed := make([]*InstanceState, len(old.Deposed))
+ for i, v := range old.Deposed {
+ upgraded, err := v.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
+ }
+ deposed[i] = upgraded
+ }
+ if len(deposed) == 0 {
+ deposed = nil
+ }
+
+ return &ResourceState{
+ Type: old.Type,
+ Dependencies: dependencies.([]string),
+ Primary: primary,
+ Deposed: deposed,
+ Provider: old.Provider,
+ }, nil
+}
+
+func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) {
+ if old == nil {
+ return nil, nil
+ }
+
+ attributes, err := copystructure.Copy(old.Attributes)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
+ }
+ ephemeral, err := old.Ephemeral.upgradeToV2()
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
+ }
+
+ meta, err := copystructure.Copy(old.Meta)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
+ }
+
+ newMeta := make(map[string]interface{})
+ for k, v := range meta.(map[string]string) {
+ newMeta[k] = v
+ }
+
+ return &InstanceState{
+ ID: old.ID,
+ Attributes: attributes.(map[string]string),
+ Ephemeral: *ephemeral,
+ Meta: newMeta,
+ }, nil
+}
+
+func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) {
+ connInfo, err := copystructure.Copy(old.ConnInfo)
+ if err != nil {
+ return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err)
+ }
+ return &EphemeralState{
+ ConnInfo: connInfo.(map[string]string),
+ }, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
new file mode 100644
index 00000000..e52d35fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
@@ -0,0 +1,142 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// The upgrade process from V2 to V3 state does not affect the structure,
+// so we do not need to redeclare all of the structs involved - we just
+// take a deep copy of the old structure and assert the version number is
+// as we expect.
+func upgradeStateV2ToV3(old *State) (*State, error) {
+ new := old.DeepCopy()
+
+ // Ensure the copied version is v2 before attempting to upgrade
+ if new.Version != 2 {
+ return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " +
+ "a state which is not version 2.")
+ }
+
+ // Set the new version number
+ new.Version = 3
+
+ // Change the counts for things which look like maps to use the %
+ // syntax. Remove counts for empty collections - they will be added
+ // back in later.
+ for _, module := range new.Modules {
+ for _, resource := range module.Resources {
+ // Upgrade Primary
+ if resource.Primary != nil {
+ upgradeAttributesV2ToV3(resource.Primary)
+ }
+
+ // Upgrade Deposed
+ if resource.Deposed != nil {
+ for _, deposed := range resource.Deposed {
+ upgradeAttributesV2ToV3(deposed)
+ }
+ }
+ }
+ }
+
+ return new, nil
+}
+
+func upgradeAttributesV2ToV3(instanceState *InstanceState) error {
+ collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`)
+ collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`)
+
+ // Identify the key prefix of anything which is a collection
+ var collectionKeyPrefixes []string
+ for key := range instanceState.Attributes {
+ if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
+ collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1])
+ }
+ }
+ sort.Strings(collectionKeyPrefixes)
+
+ log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes)
+
+ // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not
+ // run very often.
+ for _, prefix := range collectionKeyPrefixes {
+ // First get the actual keys that belong to this prefix
+ var potentialKeysMatching []string
+ for key := range instanceState.Attributes {
+ if strings.HasPrefix(key, prefix) {
+ potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix))
+ }
+ }
+ sort.Strings(potentialKeysMatching)
+
+ var actualKeysMatching []string
+ for _, key := range potentialKeysMatching {
+ if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
+ actualKeysMatching = append(actualKeysMatching, submatches[0][1])
+ } else {
+ if key != "#" {
+ actualKeysMatching = append(actualKeysMatching, key)
+ }
+ }
+ }
+ actualKeysMatching = uniqueSortedStrings(actualKeysMatching)
+
+ // Now inspect the keys in order to determine whether this is most likely to be
+ // a map, list or set. There is room for error here, so we log in each case. If
+ // there is no method of telling, we remove the key from the InstanceState in
+ // order that it will be recreated. Again, this could be rolled into fewer loops
+ // but we prefer clarity.
+
+ oldCountKey := fmt.Sprintf("%s#", prefix)
+
+ // First, detect "obvious" maps - which have non-numeric keys (mostly).
+ hasNonNumericKeys := false
+ for _, key := range actualKeysMatching {
+ if _, err := strconv.Atoi(key); err != nil {
+ hasNonNumericKeys = true
+ }
+ }
+ if hasNonNumericKeys {
+ newCountKey := fmt.Sprintf("%s%%", prefix)
+
+ instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey]
+ delete(instanceState.Attributes, oldCountKey)
+ log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s",
+ strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey])
+ }
+
+ // Now detect empty collections and remove them from state.
+ if len(actualKeysMatching) == 0 {
+ delete(instanceState.Attributes, oldCountKey)
+ log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.",
+ strings.TrimSuffix(prefix, "."))
+ }
+ }
+
+ return nil
+}
+
+// uniqueSortedStrings removes duplicates from a slice of strings and returns
+// a sorted slice of the unique strings.
+func uniqueSortedStrings(input []string) []string {
+ uniquemap := make(map[string]struct{})
+ for _, str := range input {
+ uniquemap[str] = struct{}{}
+ }
+
+ output := make([]string, len(uniquemap))
+
+ i := 0
+ for key := range uniquemap {
+ output[i] = key
+ i = i + 1
+ }
+
+ sort.Strings(output)
+ return output
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
new file mode 100644
index 00000000..68cffb41
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
@@ -0,0 +1,145 @@
+package terraform
+
+// stateV1 keeps track of a snapshot state-of-the-world that Terraform
+// can use to keep track of what real world resources it is actually
+// managing.
+//
+// stateV1 is _only used for the purposes of backwards compatibility
+// and is no longer used in Terraform.
+//
+// For the upgrade process, see state_upgrade_v1_to_v2.go
+type stateV1 struct {
+ // Version is the protocol version. "1" for a StateV1.
+ Version int `json:"version"`
+
+ // Serial is incremented on any operation that modifies
+ // the State file. It is used to detect potentially conflicting
+ // updates.
+ Serial int64 `json:"serial"`
+
+ // Remote is used to track the metadata required to
+ // pull and push state files from a remote storage endpoint.
+ Remote *remoteStateV1 `json:"remote,omitempty"`
+
+ // Modules contains all the modules in a breadth-first order
+ Modules []*moduleStateV1 `json:"modules"`
+}
+
+type remoteStateV1 struct {
+ // Type controls the client we use for the remote state
+ Type string `json:"type"`
+
+ // Config is used to store arbitrary configuration that
+ // is type specific
+ Config map[string]string `json:"config"`
+}
+
+type moduleStateV1 struct {
+ // Path is the import path from the root module. Modules imports are
+ // always disjoint, so the path represents amodule tree
+ Path []string `json:"path"`
+
+ // Outputs declared by the module and maintained for each module
+ // even though only the root module technically needs to be kept.
+ // This allows operators to inspect values at the boundaries.
+ Outputs map[string]string `json:"outputs"`
+
+ // Resources is a mapping of the logically named resource to
+ // the state of the resource. Each resource may actually have
+ // N instances underneath, although a user only needs to think
+ // about the 1:1 case.
+ Resources map[string]*resourceStateV1 `json:"resources"`
+
+ // Dependencies are a list of things that this module relies on
+ // existing to remain intact. For example: an module may depend
+ // on a VPC ID given by an aws_vpc resource.
+ //
+ // Terraform uses this information to build valid destruction
+ // orders and to warn the user if they're destroying a module that
+ // another resource depends on.
+ //
+ // Things can be put into this list that may not be managed by
+ // Terraform. If Terraform doesn't find a matching ID in the
+ // overall state, then it assumes it isn't managed and doesn't
+ // worry about it.
+ Dependencies []string `json:"depends_on,omitempty"`
+}
+
+type resourceStateV1 struct {
+ // This is filled in and managed by Terraform, and is the resource
+ // type itself such as "mycloud_instance". If a resource provider sets
+ // this value, it won't be persisted.
+ Type string `json:"type"`
+
+ // Dependencies are a list of things that this resource relies on
+ // existing to remain intact. For example: an AWS instance might
+ // depend on a subnet (which itself might depend on a VPC, and so
+ // on).
+ //
+ // Terraform uses this information to build valid destruction
+ // orders and to warn the user if they're destroying a resource that
+ // another resource depends on.
+ //
+ // Things can be put into this list that may not be managed by
+ // Terraform. If Terraform doesn't find a matching ID in the
+ // overall state, then it assumes it isn't managed and doesn't
+ // worry about it.
+ Dependencies []string `json:"depends_on,omitempty"`
+
+ // Primary is the current active instance for this resource.
+ // It can be replaced but only after a successful creation.
+ // This is the instances on which providers will act.
+ Primary *instanceStateV1 `json:"primary"`
+
+ // Tainted is used to track any underlying instances that
+ // have been created but are in a bad or unknown state and
+ // need to be cleaned up subsequently. In the
+ // standard case, there is only at most a single instance.
+ // However, in pathological cases, it is possible for the number
+ // of instances to accumulate.
+ Tainted []*instanceStateV1 `json:"tainted,omitempty"`
+
+ // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
+ // Primary is Deposed to get it out of the way for the replacement Primary to
+ // be created by Apply. If the replacement Primary creates successfully, the
+ // Deposed instance is cleaned up. If there were problems creating the
+ // replacement, the instance remains in the Deposed list so it can be
+ // destroyed in a future run. Functionally, Deposed instances are very
+ // similar to Tainted instances in that Terraform is only tracking them in
+ // order to remember to destroy them.
+ Deposed []*instanceStateV1 `json:"deposed,omitempty"`
+
+ // Provider is used when a resource is connected to a provider with an alias.
+ // If this string is empty, the resource is connected to the default provider,
+ // e.g. "aws_instance" goes with the "aws" provider.
+ // If the resource block contained a "provider" key, that value will be set here.
+ Provider string `json:"provider,omitempty"`
+}
+
+type instanceStateV1 struct {
+ // A unique ID for this resource. This is opaque to Terraform
+ // and is only meant as a lookup mechanism for the providers.
+ ID string `json:"id"`
+
+ // Attributes are basic information about the resource. Any keys here
+ // are accessible in variable format within Terraform configurations:
+ // ${resourcetype.name.attribute}.
+ Attributes map[string]string `json:"attributes,omitempty"`
+
+ // Ephemeral is used to store any state associated with this instance
+ // that is necessary for the Terraform run to complete, but is not
+ // persisted to a state file.
+ Ephemeral ephemeralStateV1 `json:"-"`
+
+ // Meta is a simple K/V map that is persisted to the State but otherwise
+ // ignored by Terraform core. It's meant to be used for accounting by
+ // external client code.
+ Meta map[string]string `json:"meta,omitempty"`
+}
+
+type ephemeralStateV1 struct {
+ // ConnInfo is used for the providers to export information which is
+ // used to connect to the resource for provisioning. For example,
+ // this could contain SSH or WinRM credentials.
+ ConnInfo map[string]string `json:"-"`
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/testing.go b/vendor/github.com/hashicorp/terraform/terraform/testing.go
new file mode 100644
index 00000000..3f0418d9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/testing.go
@@ -0,0 +1,19 @@
+package terraform
+
+import (
+ "os"
+ "testing"
+)
+
+// TestStateFile writes the given state to the path.
+func TestStateFile(t *testing.T, path string, state *State) {
+ f, err := os.Create(path)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer f.Close()
+
+ if err := WriteState(state, f); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go
new file mode 100644
index 00000000..f4a431a6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go
@@ -0,0 +1,52 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphTransformer is the interface that transformers implement. This
+// interface is only for transforms that need entire graph visibility.
+type GraphTransformer interface {
+ Transform(*Graph) error
+}
+
+// GraphVertexTransformer is an interface that transforms a single
+// Vertex within with graph. This is a specialization of GraphTransformer
+// that makes it easy to do vertex replacement.
+//
+// The GraphTransformer that runs through the GraphVertexTransformers is
+// VertexTransformer.
+type GraphVertexTransformer interface {
+ Transform(dag.Vertex) (dag.Vertex, error)
+}
+
+// GraphTransformIf is a helper function that conditionally returns a
+// GraphTransformer given. This is useful for calling inline a sequence
+// of transforms without having to split it up into multiple append() calls.
+func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer {
+ if f() {
+ return then
+ }
+
+ return nil
+}
+
+type graphTransformerMulti struct {
+ Transforms []GraphTransformer
+}
+
+func (t *graphTransformerMulti) Transform(g *Graph) error {
+ for _, t := range t.Transforms {
+ if err := t.Transform(g); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GraphTransformMulti combines multiple graph transformers into a single
+// GraphTransformer that runs all the individual graph transformers.
+func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer {
+ return &graphTransformerMulti{Transforms: ts}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
new file mode 100644
index 00000000..10506ea0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
@@ -0,0 +1,80 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// GraphNodeAttachProvider is an interface that must be implemented by nodes
+// that want provider configurations attached.
+type GraphNodeAttachProvider interface {
+ // Must be implemented to determine the path for the configuration
+ GraphNodeSubPath
+
+ // ProviderName with no module prefix. Example: "aws".
+ ProviderName() string
+
+ // Sets the configuration
+ AttachProvider(*config.ProviderConfig)
+}
+
+// AttachProviderConfigTransformer goes through the graph and attaches
+// provider configuration structures to nodes that implement the interfaces
+// above.
+//
+// The attached configuration structures are directly from the configuration.
+// If they're going to be modified, a copy should be made.
+type AttachProviderConfigTransformer struct {
+ Module *module.Tree // Module is the root module for the config
+}
+
+func (t *AttachProviderConfigTransformer) Transform(g *Graph) error {
+ if err := t.attachProviders(g); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error {
+ // Go through and find GraphNodeAttachProvider
+ for _, v := range g.Vertices() {
+ // Only care about GraphNodeAttachProvider implementations
+ apn, ok := v.(GraphNodeAttachProvider)
+ if !ok {
+ continue
+ }
+
+ // Determine what we're looking for
+ path := normalizeModulePath(apn.Path())
+ path = path[1:]
+ name := apn.ProviderName()
+ log.Printf("[TRACE] Attach provider request: %#v %s", path, name)
+
+ // Get the configuration.
+ tree := t.Module.Child(path)
+ if tree == nil {
+ continue
+ }
+
+ // Go through the provider configs to find the matching config
+ for _, p := range tree.Config().ProviderConfigs {
+ // Build the name, which is "name.alias" if an alias exists
+ current := p.Name
+ if p.Alias != "" {
+ current += "." + p.Alias
+ }
+
+ // If the configs match then attach!
+ if current == name {
+ log.Printf("[TRACE] Attaching provider config: %#v", p)
+ apn.AttachProvider(p)
+ break
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
new file mode 100644
index 00000000..f2ee37e5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
@@ -0,0 +1,78 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes
+// that want resource configurations attached.
+type GraphNodeAttachResourceConfig interface {
+ // ResourceAddr is the address to the resource
+ ResourceAddr() *ResourceAddress
+
+ // Sets the configuration
+ AttachResourceConfig(*config.Resource)
+}
+
+// AttachResourceConfigTransformer goes through the graph and attaches
+// resource configuration structures to nodes that implement the interfaces
+// above.
+//
+// The attached configuration structures are directly from the configuration.
+// If they're going to be modified, a copy should be made.
+type AttachResourceConfigTransformer struct {
+ Module *module.Tree // Module is the root module for the config
+}
+
+func (t *AttachResourceConfigTransformer) Transform(g *Graph) error {
+ log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...")
+
+ // Go through and find GraphNodeAttachResource
+ for _, v := range g.Vertices() {
+ // Only care about GraphNodeAttachResource implementations
+ arn, ok := v.(GraphNodeAttachResourceConfig)
+ if !ok {
+ continue
+ }
+
+ // Determine what we're looking for
+ addr := arn.ResourceAddr()
+ log.Printf(
+ "[TRACE] AttachResourceConfigTransformer: Attach resource "+
+ "config request: %s", addr)
+
+ // Get the configuration.
+ path := normalizeModulePath(addr.Path)
+ path = path[1:]
+ tree := t.Module.Child(path)
+ if tree == nil {
+ continue
+ }
+
+ // Go through the resource configs to find the matching config
+ for _, r := range tree.Config().Resources {
+ // Get a resource address so we can compare
+ a, err := parseResourceAddressConfig(r)
+ if err != nil {
+ panic(fmt.Sprintf(
+ "Error parsing config address, this is a bug: %#v", r))
+ }
+ a.Path = addr.Path
+
+ // If this is not the same resource, then continue
+ if !a.Equals(addr) {
+ continue
+ }
+
+ log.Printf("[TRACE] Attaching resource config: %#v", r)
+ arn.AttachResourceConfig(r)
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
new file mode 100644
index 00000000..564ff08f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
@@ -0,0 +1,68 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeAttachResourceState is an interface that can be implemented
+// to request that a ResourceState is attached to the node.
+type GraphNodeAttachResourceState interface {
+ // The address to the resource for the state
+ ResourceAddr() *ResourceAddress
+
+ // Sets the state
+ AttachResourceState(*ResourceState)
+}
+
+// AttachStateTransformer goes through the graph and attaches
+// state to nodes that implement the interfaces above.
+type AttachStateTransformer struct {
+ State *State // State is the root state
+}
+
+func (t *AttachStateTransformer) Transform(g *Graph) error {
+ // If no state, then nothing to do
+ if t.State == nil {
+ log.Printf("[DEBUG] Not attaching any state: state is nil")
+ return nil
+ }
+
+ filter := &StateFilter{State: t.State}
+ for _, v := range g.Vertices() {
+ // Only care about nodes requesting we're adding state
+ an, ok := v.(GraphNodeAttachResourceState)
+ if !ok {
+ continue
+ }
+ addr := an.ResourceAddr()
+
+ // Get the module state
+ results, err := filter.Filter(addr.String())
+ if err != nil {
+ return err
+ }
+
+ // Attach the first resource state we get
+ found := false
+ for _, result := range results {
+ if rs, ok := result.Value.(*ResourceState); ok {
+ log.Printf(
+ "[DEBUG] Attaching resource state to %q: %#v",
+ dag.VertexName(v), rs)
+ an.AttachResourceState(rs)
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ log.Printf(
+ "[DEBUG] Resource state not found for %q: %s",
+ dag.VertexName(v), addr)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
new file mode 100644
index 00000000..61bce853
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
@@ -0,0 +1,135 @@
+package terraform
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "sync"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ConfigTransformer is a GraphTransformer that adds all the resources
+// from the configuration to the graph.
+//
+// The module used to configure this transformer must be the root module.
+//
+// Only resources are added to the graph. Variables, outputs, and
+// providers must be added via other transforms.
+//
+// Unlike ConfigTransformerOld, this transformer creates a graph with
+// all resources including module resources, rather than creating module
+// nodes that are then "flattened".
+type ConfigTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ // Module is the module to add resources from.
+ Module *module.Tree
+
+ // Unique will only add resources that aren't already present in the graph.
+ Unique bool
+
+ // Mode will only add resources that match the given mode
+ ModeFilter bool
+ Mode config.ResourceMode
+
+ l sync.Mutex
+ uniqueMap map[string]struct{}
+}
+
+func (t *ConfigTransformer) Transform(g *Graph) error {
+ // Lock since we use some internal state
+ t.l.Lock()
+ defer t.l.Unlock()
+
+ // If no module is given, we don't do anything
+ if t.Module == nil {
+ return nil
+ }
+
+ // If the module isn't loaded, that is simply an error
+ if !t.Module.Loaded() {
+ return errors.New("module must be loaded for ConfigTransformer")
+ }
+
+ // Reset the uniqueness map. If we're tracking uniques, then populate
+ // it with addresses.
+ t.uniqueMap = make(map[string]struct{})
+ defer func() { t.uniqueMap = nil }()
+ if t.Unique {
+ for _, v := range g.Vertices() {
+ if rn, ok := v.(GraphNodeResource); ok {
+ t.uniqueMap[rn.ResourceAddr().String()] = struct{}{}
+ }
+ }
+ }
+
+ // Start the transformation process
+ return t.transform(g, t.Module)
+}
+
+func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error {
+ // If no config, do nothing
+ if m == nil {
+ return nil
+ }
+
+ // Add our resources
+ if err := t.transformSingle(g, m); err != nil {
+ return err
+ }
+
+ // Transform all the children.
+ for _, c := range m.Children() {
+ if err := t.transform(g, c); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error {
+ log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path())
+
+ // Get the configuration for this module
+ conf := m.Config()
+
+ // Build the path we're at
+ path := m.Path()
+
+ // Write all the resources out
+ for _, r := range conf.Resources {
+ // Build the resource address
+ addr, err := parseResourceAddressConfig(r)
+ if err != nil {
+ panic(fmt.Sprintf(
+ "Error parsing config address, this is a bug: %#v", r))
+ }
+ addr.Path = path
+
+ // If this is already in our uniqueness map, don't add it again
+ if _, ok := t.uniqueMap[addr.String()]; ok {
+ continue
+ }
+
+ // Remove non-matching modes
+ if t.ModeFilter && addr.Mode != t.Mode {
+ continue
+ }
+
+ // Build the abstract node and the concrete one
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ // Add it to the graph
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
new file mode 100644
index 00000000..92f9888d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
@@ -0,0 +1,80 @@
+package terraform
+
+import (
+ "errors"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// FlatConfigTransformer is a GraphTransformer that adds the configuration
+// to the graph. The module used to configure this transformer must be
+// the root module.
+//
+// This transform adds the nodes but doesn't connect any of the references.
+// The ReferenceTransformer should be used for that.
+//
+// NOTE: In relation to ConfigTransformer: this is a newer generation config
+// transformer. It puts the _entire_ config into the graph (there is no
+// "flattening" step as before).
+type FlatConfigTransformer struct {
+ Concrete ConcreteResourceNodeFunc // What to turn resources into
+
+ Module *module.Tree
+}
+
+func (t *FlatConfigTransformer) Transform(g *Graph) error {
+ // If no module, we do nothing
+ if t.Module == nil {
+ return nil
+ }
+
+ // If the module is not loaded, that is an error
+ if !t.Module.Loaded() {
+ return errors.New("module must be loaded")
+ }
+
+ return t.transform(g, t.Module)
+}
+
+func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error {
+ // If no module, no problem
+ if m == nil {
+ return nil
+ }
+
+ // Transform all the children.
+ for _, c := range m.Children() {
+ if err := t.transform(g, c); err != nil {
+ return err
+ }
+ }
+
+ // Get the configuration for this module
+ config := m.Config()
+
+ // Write all the resources out
+ for _, r := range config.Resources {
+ // Grab the address for this resource
+ addr, err := parseResourceAddressConfig(r)
+ if err != nil {
+ return err
+ }
+ addr.Path = m.Path()
+
+ // Build the abstract resource. We have the config already so
+ // we'll just pre-populate that.
+ abstract := &NodeAbstractResource{
+ Addr: addr,
+ Config: r,
+ }
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
new file mode 100644
index 00000000..ec412582
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
@@ -0,0 +1,23 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/config"
+)
+
+// varNameForVar returns the VarName value for an interpolated variable.
+// This value is compared to the VarName() value for the nodes within the
+// graph to build the graph edges.
+func varNameForVar(raw config.InterpolatedVariable) string {
+ switch v := raw.(type) {
+ case *config.ModuleVariable:
+ return fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)
+ case *config.ResourceVariable:
+ return v.ResourceId()
+ case *config.UserVariable:
+ return fmt.Sprintf("var.%s", v.Name)
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
new file mode 100644
index 00000000..83415f35
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
@@ -0,0 +1,28 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/dag"
+)
+
+// CountBoundaryTransformer adds a node that depends on everything else
+// so that it runs last in order to clean up the state for nodes that
+// are on the "count boundary": "foo.0" when only one exists becomes "foo"
+type CountBoundaryTransformer struct{}
+
+func (t *CountBoundaryTransformer) Transform(g *Graph) error {
+ node := &NodeCountBoundary{}
+ g.Add(node)
+
+ // Depends on everything
+ for _, v := range g.Vertices() {
+ // Don't connect to ourselves
+ if v == node {
+ continue
+ }
+
+ // Connect!
+ g.Connect(dag.BasicEdge(node, v))
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
new file mode 100644
index 00000000..2148cef4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
@@ -0,0 +1,168 @@
+package terraform
+
+import "fmt"
+
+// DeposedTransformer is a GraphTransformer that adds deposed resources
+// to the graph.
+type DeposedTransformer struct {
+ // State is the global state. We'll automatically find the correct
+ // ModuleState based on the Graph.Path that is being transformed.
+ State *State
+
+ // View, if non-empty, is the ModuleState.View used around the state
+ // to find deposed resources.
+ View string
+}
+
+func (t *DeposedTransformer) Transform(g *Graph) error {
+ state := t.State.ModuleByPath(g.Path)
+ if state == nil {
+ // If there is no state for our module there can't be any deposed
+ // resources, since they live in the state.
+ return nil
+ }
+
+ // If we have a view, apply it now
+ if t.View != "" {
+ state = state.View(t.View)
+ }
+
+ // Go through all the resources in our state to look for deposed resources
+ for k, rs := range state.Resources {
+ // If we have no deposed resources, then move on
+ if len(rs.Deposed) == 0 {
+ continue
+ }
+ deposed := rs.Deposed
+
+ for i, _ := range deposed {
+ g.Add(&graphNodeDeposedResource{
+ Index: i,
+ ResourceName: k,
+ ResourceType: rs.Type,
+ Provider: rs.Provider,
+ })
+ }
+ }
+
+ return nil
+}
+
+// graphNodeDeposedResource is the graph vertex representing a deposed resource.
+type graphNodeDeposedResource struct {
+ Index int
+ ResourceName string
+ ResourceType string
+ Provider string
+}
+
+func (n *graphNodeDeposedResource) Name() string {
+ return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index)
+}
+
+func (n *graphNodeDeposedResource) ProvidedBy() []string {
+ return []string{resourceProvider(n.ResourceName, n.Provider)}
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeDeposedResource) EvalTree() EvalNode {
+ var provider ResourceProvider
+ var state *InstanceState
+
+ seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)}
+
+ // Build instance info
+ info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType}
+ seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info})
+
+ // Refresh the resource
+ seq.Nodes = append(seq.Nodes, &EvalOpFilter{
+ Ops: []walkOperation{walkRefresh},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadStateDeposed{
+ Name: n.ResourceName,
+ Output: &state,
+ Index: n.Index,
+ },
+ &EvalRefresh{
+ Info: info,
+ Provider: &provider,
+ State: &state,
+ Output: &state,
+ },
+ &EvalWriteStateDeposed{
+ Name: n.ResourceName,
+ ResourceType: n.ResourceType,
+ Provider: n.Provider,
+ State: &state,
+ Index: n.Index,
+ },
+ },
+ },
+ })
+
+ // Apply
+ var diff *InstanceDiff
+ var err error
+ seq.Nodes = append(seq.Nodes, &EvalOpFilter{
+ Ops: []walkOperation{walkApply, walkDestroy},
+ Node: &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalReadStateDeposed{
+ Name: n.ResourceName,
+ Output: &state,
+ Index: n.Index,
+ },
+ &EvalDiffDestroy{
+ Info: info,
+ State: &state,
+ Output: &diff,
+ },
+ // Call pre-apply hook
+ &EvalApplyPre{
+ Info: info,
+ State: &state,
+ Diff: &diff,
+ },
+ &EvalApply{
+ Info: info,
+ State: &state,
+ Diff: &diff,
+ Provider: &provider,
+ Output: &state,
+ Error: &err,
+ },
+ // Always write the resource back to the state deposed... if it
+ // was successfully destroyed it will be pruned. If it was not, it will
+ // be caught on the next run.
+ &EvalWriteStateDeposed{
+ Name: n.ResourceName,
+ ResourceType: n.ResourceType,
+ Provider: n.Provider,
+ State: &state,
+ Index: n.Index,
+ },
+ &EvalApplyPost{
+ Info: info,
+ State: &state,
+ Error: &err,
+ },
+ &EvalReturnError{
+ Error: &err,
+ },
+ &EvalUpdateStateHook{},
+ },
+ },
+ })
+
+ return seq
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
new file mode 100644
index 00000000..edfb460b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
@@ -0,0 +1,257 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeDestroyerCBD must be implemented by nodes that might be
+// create-before-destroy destroyers.
+type GraphNodeDestroyerCBD interface {
+ GraphNodeDestroyer
+
+ // CreateBeforeDestroy returns true if this node represents a node
+ // that is doing a CBD.
+ CreateBeforeDestroy() bool
+
+ // ModifyCreateBeforeDestroy is called when the CBD state of a node
+ // is changed dynamically. This can return an error if this isn't
+ // allowed.
+ ModifyCreateBeforeDestroy(bool) error
+}
+
+// CBDEdgeTransformer modifies the edges of CBD nodes that went through
+// the DestroyEdgeTransformer to have the right dependencies. There are
+// two real tasks here:
+//
+// 1. With CBD, the destroy edge is inverted: the destroy depends on
+// the creation.
+//
+// 2. A_d must depend on resources that depend on A. This is to enable
+// the destroy to only happen once nodes that depend on A successfully
+// update to A. Example: adding a web server updates the load balancer
+// before deleting the old web server.
+//
+type CBDEdgeTransformer struct {
+ // Module and State are only needed to look up dependencies in
+ // any way possible. Either can be nil if not availabile.
+ Module *module.Tree
+ State *State
+}
+
+func (t *CBDEdgeTransformer) Transform(g *Graph) error {
+ log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...")
+
+ // Go through and reverse any destroy edges
+ destroyMap := make(map[string][]dag.Vertex)
+ for _, v := range g.Vertices() {
+ dn, ok := v.(GraphNodeDestroyerCBD)
+ if !ok {
+ continue
+ }
+
+ if !dn.CreateBeforeDestroy() {
+ // If there are no CBD ancestors (dependent nodes), then we
+ // do nothing here.
+ if !t.hasCBDAncestor(g, v) {
+ continue
+ }
+
+ // If this isn't naturally a CBD node, this means that an ancestor is
+ // and we need to auto-upgrade this node to CBD. We do this because
+ // a CBD node depending on non-CBD will result in cycles. To avoid this,
+ // we always attempt to upgrade it.
+ if err := dn.ModifyCreateBeforeDestroy(true); err != nil {
+ return fmt.Errorf(
+ "%s: must have create before destroy enabled because "+
+ "a dependent resource has CBD enabled. However, when "+
+ "attempting to automatically do this, an error occurred: %s",
+ dag.VertexName(v), err)
+ }
+ }
+
+ // Find the destroy edge. There should only be one.
+ for _, e := range g.EdgesTo(v) {
+ // Not a destroy edge, ignore it
+ de, ok := e.(*DestroyEdge)
+ if !ok {
+ continue
+ }
+
+ log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s",
+ dag.VertexName(de.Source()), dag.VertexName(de.Target()))
+
+ // Found it! Invert.
+ g.RemoveEdge(de)
+ g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()})
+ }
+
+ // If the address has an index, we strip that. Our depMap creation
+ // graph doesn't expand counts so we don't currently get _exact_
+ // dependencies. One day when we limit dependencies more exactly
+ // this will have to change. We have a test case covering this
+ // (depNonCBDCountBoth) so it'll be caught.
+ addr := dn.DestroyAddr()
+ if addr.Index >= 0 {
+ addr = addr.Copy() // Copy so that we don't modify any pointers
+ addr.Index = -1
+ }
+
+ // Add this to the list of nodes that we need to fix up
+ // the edges for (step 2 above in the docs).
+ key := addr.String()
+ destroyMap[key] = append(destroyMap[key], v)
+ }
+
+ // If we have no CBD nodes, then our work here is done
+ if len(destroyMap) == 0 {
+ return nil
+ }
+
+ // We have CBD nodes. We now have to move on to the much more difficult
+ // task of connecting dependencies of the creation side of the destroy
+ // to the destruction node. The easiest way to explain this is an example:
+ //
+ // Given a pre-destroy dependence of: A => B
+ // And A has CBD set.
+ //
+ // The resulting graph should be: A => B => A_d
+ //
+ // They key here is that B happens before A is destroyed. This is to
+ // facilitate the primary purpose for CBD: making sure that downstreams
+ // are properly updated to avoid downtime before the resource is destroyed.
+ //
+ // We can't trust that the resource being destroyed or anything that
+ // depends on it is actually in our current graph so we make a new
+ // graph in order to determine those dependencies and add them in.
+ log.Printf("[TRACE] CBDEdgeTransformer: building graph to find dependencies...")
+ depMap, err := t.depMap(destroyMap)
+ if err != nil {
+ return err
+ }
+
+ // We now have the mapping of resource addresses to the destroy
+ // nodes they need to depend on. We now go through our own vertices to
+ // find any matching these addresses and make the connection.
+ for _, v := range g.Vertices() {
+ // We're looking for creators
+ rn, ok := v.(GraphNodeCreator)
+ if !ok {
+ continue
+ }
+
+ // Get the address
+ addr := rn.CreateAddr()
+
+ // If the address has an index, we strip that. Our depMap creation
+ // graph doesn't expand counts so we don't currently get _exact_
+ // dependencies. One day when we limit dependencies more exactly
+ // this will have to change. We have a test case covering this
+ // (depNonCBDCount) so it'll be caught.
+ if addr.Index >= 0 {
+ addr = addr.Copy() // Copy so that we don't modify any pointers
+ addr.Index = -1
+ }
+
+ // If there is nothing this resource should depend on, ignore it
+ key := addr.String()
+ dns, ok := depMap[key]
+ if !ok {
+ continue
+ }
+
+ // We have nodes! Make the connection
+ for _, dn := range dns {
+ log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s",
+ dag.VertexName(dn), dag.VertexName(v))
+ g.Connect(dag.BasicEdge(dn, v))
+ }
+ }
+
+ return nil
+}
+
+func (t *CBDEdgeTransformer) depMap(
+ destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) {
+ // Build the graph of our config, this ensures that all resources
+ // are present in the graph.
+ g, err := (&BasicGraphBuilder{
+ Steps: []GraphTransformer{
+ &FlatConfigTransformer{Module: t.Module},
+ &AttachResourceConfigTransformer{Module: t.Module},
+ &AttachStateTransformer{State: t.State},
+ &ReferenceTransformer{},
+ },
+ Name: "CBDEdgeTransformer",
+ }).Build(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Using this graph, build the list of destroy nodes that each resource
+ // address should depend on. For example, when we find B, we map the
+ // address of B to A_d in the "depMap" variable below.
+ depMap := make(map[string][]dag.Vertex)
+ for _, v := range g.Vertices() {
+ // We're looking for resources.
+ rn, ok := v.(GraphNodeResource)
+ if !ok {
+ continue
+ }
+
+ // Get the address
+ addr := rn.ResourceAddr()
+ key := addr.String()
+
+ // Get the destroy nodes that are destroying this resource.
+ // If there aren't any, then we don't need to worry about
+ // any connections.
+ dns, ok := destroyMap[key]
+ if !ok {
+ continue
+ }
+
+ // Get the nodes that depend on this on. In the example above:
+ // finding B in A => B.
+ for _, v := range g.UpEdges(v).List() {
+ // We're looking for resources.
+ rn, ok := v.(GraphNodeResource)
+ if !ok {
+ continue
+ }
+
+ // Keep track of the destroy nodes that this address
+ // needs to depend on.
+ key := rn.ResourceAddr().String()
+ depMap[key] = append(depMap[key], dns...)
+ }
+ }
+
+ return depMap, nil
+}
+
+// hasCBDAncestor returns true if any ancestor (node that depends on this)
+// has CBD set.
+func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool {
+ s, _ := g.Ancestors(v)
+ if s == nil {
+ return true
+ }
+
+ for _, v := range s.List() {
+ dn, ok := v.(GraphNodeDestroyerCBD)
+ if !ok {
+ continue
+ }
+
+ if dn.CreateBeforeDestroy() {
+ // some ancestor is CreateBeforeDestroy, so we need to follow suit
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
new file mode 100644
index 00000000..22be1ab6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
@@ -0,0 +1,269 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeDestroyer must be implemented by nodes that destroy resources.
+type GraphNodeDestroyer interface {
+ dag.Vertex
+
+ // ResourceAddr is the address of the resource that is being
+ // destroyed by this node. If this returns nil, then this node
+ // is not destroying anything.
+ DestroyAddr() *ResourceAddress
+}
+
+// GraphNodeCreator must be implemented by nodes that create OR update resources.
+type GraphNodeCreator interface {
+ // ResourceAddr is the address of the resource being created or updated
+ CreateAddr() *ResourceAddress
+}
+
+// DestroyEdgeTransformer is a GraphTransformer that creates the proper
+// references for destroy resources. Destroy resources are more complex
+// in that they must be depend on the destruction of resources that
+// in turn depend on the CREATION of the node being destroy.
+//
+// That is complicated. Visually:
+//
+// B_d -> A_d -> A -> B
+//
+// Notice that A destroy depends on B destroy, while B create depends on
+// A create. They're inverted. This must be done for example because often
+// dependent resources will block parent resources from deleting. Concrete
+// example: VPC with subnets, the VPC can't be deleted while there are
+// still subnets.
+type DestroyEdgeTransformer struct {
+ // These are needed to properly build the graph of dependencies
+ // to determine what a destroy node depends on. Any of these can be nil.
+ Module *module.Tree
+ State *State
+}
+
+func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
+ log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...")
+
+ // Build a map of what is being destroyed (by address string) to
+ // the list of destroyers. In general there will only be one destroyer
+ // but to make it more robust we support multiple.
+ destroyers := make(map[string][]GraphNodeDestroyer)
+ for _, v := range g.Vertices() {
+ dn, ok := v.(GraphNodeDestroyer)
+ if !ok {
+ continue
+ }
+
+ addr := dn.DestroyAddr()
+ if addr == nil {
+ continue
+ }
+
+ key := addr.String()
+ log.Printf(
+ "[TRACE] DestroyEdgeTransformer: %s destroying %q",
+ dag.VertexName(dn), key)
+ destroyers[key] = append(destroyers[key], dn)
+ }
+
+ // If we aren't destroying anything, there will be no edges to make
+ // so just exit early and avoid future work.
+ if len(destroyers) == 0 {
+ return nil
+ }
+
+ // Go through and connect creators to destroyers. Going along with
+ // our example, this makes: A_d => A
+ for _, v := range g.Vertices() {
+ cn, ok := v.(GraphNodeCreator)
+ if !ok {
+ continue
+ }
+
+ addr := cn.CreateAddr()
+ if addr == nil {
+ continue
+ }
+
+ key := addr.String()
+ ds := destroyers[key]
+ if len(ds) == 0 {
+ continue
+ }
+
+ for _, d := range ds {
+ // For illustrating our example
+ a_d := d.(dag.Vertex)
+ a := v
+
+ log.Printf(
+ "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s",
+ dag.VertexName(a), dag.VertexName(a_d))
+
+ g.Connect(&DestroyEdge{S: a, T: a_d})
+ }
+ }
+
+ // This is strange but is the easiest way to get the dependencies
+ // of a node that is being destroyed. We use another graph to make sure
+ // the resource is in the graph and ask for references. We have to do this
+ // because the node that is being destroyed may NOT be in the graph.
+ //
+ // Example: resource A is force new, then destroy A AND create A are
+ // in the graph. BUT if resource A is just pure destroy, then only
+ // destroy A is in the graph, and create A is not.
+ providerFn := func(a *NodeAbstractProvider) dag.Vertex {
+ return &NodeApplyableProvider{NodeAbstractProvider: a}
+ }
+ steps := []GraphTransformer{
+ // Add outputs and metadata
+ &OutputTransformer{Module: t.Module},
+ &AttachResourceConfigTransformer{Module: t.Module},
+ &AttachStateTransformer{State: t.State},
+
+ // Add providers since they can affect destroy order as well
+ &MissingProviderTransformer{AllowAny: true, Concrete: providerFn},
+ &ProviderTransformer{},
+ &DisableProviderTransformer{},
+ &ParentProviderTransformer{},
+ &AttachProviderConfigTransformer{Module: t.Module},
+
+ // Add all the variables. We can depend on resources through
+ // variables due to module parameters, and we need to properly
+ // determine that.
+ &RootVariableTransformer{Module: t.Module},
+ &ModuleVariableTransformer{Module: t.Module},
+
+ &ReferenceTransformer{},
+ }
+
+ // Go through all the nodes being destroyed and create a graph.
+ // The resulting graph is only of things being CREATED. For example,
+ // following our example, the resulting graph would be:
+ //
+ // A, B (with no edges)
+ //
+ var tempG Graph
+ var tempDestroyed []dag.Vertex
+ for d, _ := range destroyers {
+ // d is what is being destroyed. We parse the resource address
+ // which it came from it is a panic if this fails.
+ addr, err := ParseResourceAddress(d)
+ if err != nil {
+ panic(err)
+ }
+
+ // This part is a little bit weird but is the best way to
+ // find the dependencies we need to: build a graph and use the
+ // attach config and state transformers then ask for references.
+ abstract := &NodeAbstractResource{Addr: addr}
+ tempG.Add(abstract)
+ tempDestroyed = append(tempDestroyed, abstract)
+
+ // We also add the destroy version here since the destroy can
+ // depend on things that the creation doesn't (destroy provisioners).
+ destroy := &NodeDestroyResource{NodeAbstractResource: abstract}
+ tempG.Add(destroy)
+ tempDestroyed = append(tempDestroyed, destroy)
+ }
+
+ // Run the graph transforms so we have the information we need to
+ // build references.
+ for _, s := range steps {
+ if err := s.Transform(&tempG); err != nil {
+ return err
+ }
+ }
+
+ log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String())
+
+ // Go through all the nodes in the graph and determine what they
+ // depend on.
+ for _, v := range tempDestroyed {
+ // Find all ancestors of this to determine the edges we'll depend on
+ vs, err := tempG.Ancestors(v)
+ if err != nil {
+ return err
+ }
+
+ refs := make([]dag.Vertex, 0, vs.Len())
+ for _, raw := range vs.List() {
+ refs = append(refs, raw.(dag.Vertex))
+ }
+
+ refNames := make([]string, len(refs))
+ for i, ref := range refs {
+ refNames[i] = dag.VertexName(ref)
+ }
+ log.Printf(
+ "[TRACE] DestroyEdgeTransformer: creation node %q references %s",
+ dag.VertexName(v), refNames)
+
+ // If we have no references, then we won't need to do anything
+ if len(refs) == 0 {
+ continue
+ }
+
+ // Get the destroy node for this. In the example of our struct,
+ // we are currently at B and we're looking for B_d.
+ rn, ok := v.(GraphNodeResource)
+ if !ok {
+ continue
+ }
+
+ addr := rn.ResourceAddr()
+ if addr == nil {
+ continue
+ }
+
+ dns := destroyers[addr.String()]
+
+ // We have dependencies, check if any are being destroyed
+ // to build the list of things that we must depend on!
+ //
+ // In the example of the struct, if we have:
+ //
+ // B_d => A_d => A => B
+ //
+ // Then at this point in the algorithm we started with B_d,
+ // we built B (to get dependencies), and we found A. We're now looking
+ // to see if A_d exists.
+ var depDestroyers []dag.Vertex
+ for _, v := range refs {
+ rn, ok := v.(GraphNodeResource)
+ if !ok {
+ continue
+ }
+
+ addr := rn.ResourceAddr()
+ if addr == nil {
+ continue
+ }
+
+ key := addr.String()
+ if ds, ok := destroyers[key]; ok {
+ for _, d := range ds {
+ depDestroyers = append(depDestroyers, d.(dag.Vertex))
+ log.Printf(
+ "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s",
+ key, dag.VertexName(d))
+ }
+ }
+ }
+
+ // Go through and make the connections. Use the variable
+ // names "a_d" and "b_d" to reference our example.
+ for _, a_d := range dns {
+ for _, b_d := range depDestroyers {
+ if b_d != a_d {
+ g.Connect(dag.BasicEdge(b_d, a_d))
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
new file mode 100644
index 00000000..ad46d3c6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
@@ -0,0 +1,86 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// DiffTransformer is a GraphTransformer that adds the elements of
+// the diff to the graph.
+//
+// This transform is used for example by the ApplyGraphBuilder to ensure
+// that only resources that are being modified are represented in the graph.
+//
+// Module and State is still required for the DiffTransformer for annotations
+// since the Diff doesn't contain all the information required to build the
+// complete graph (such as create-before-destroy information). The graph
+// is built based on the diff first, though, ensuring that only resources
+// that are being modified are present in the graph.
+type DiffTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ Diff *Diff
+ Module *module.Tree
+ State *State
+}
+
+func (t *DiffTransformer) Transform(g *Graph) error {
+ // If the diff is nil or empty (nil is empty) then do nothing
+ if t.Diff.Empty() {
+ return nil
+ }
+
+ // Go through all the modules in the diff.
+ log.Printf("[TRACE] DiffTransformer: starting")
+ var nodes []dag.Vertex
+ for _, m := range t.Diff.Modules {
+ log.Printf("[TRACE] DiffTransformer: Module: %s", m)
+ // TODO: If this is a destroy diff then add a module destroy node
+
+ // Go through all the resources in this module.
+ for name, inst := range m.Resources {
+ log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst)
+
+ // We have changes! This is a create or update operation.
+ // First grab the address so we have a unique way to
+ // reference this resource.
+ addr, err := parseResourceAddressInternal(name)
+ if err != nil {
+ panic(fmt.Sprintf(
+ "Error parsing internal name, this is a bug: %q", name))
+ }
+
+ // Very important: add the module path for this resource to
+ // the address. Remove "root" from it.
+ addr.Path = m.Path[1:]
+
+ // If we're destroying, add the destroy node
+ if inst.Destroy || inst.GetDestroyDeposed() {
+ abstract := &NodeAbstractResource{Addr: addr}
+ g.Add(&NodeDestroyResource{NodeAbstractResource: abstract})
+ }
+
+ // If we have changes, then add the applyable version
+ if len(inst.Attributes) > 0 {
+ // Add the resource to the graph
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ nodes = append(nodes, node)
+ }
+ }
+ }
+
+ // Add all the nodes to the graph
+ for _, n := range nodes {
+ g.Add(n)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
new file mode 100644
index 00000000..982c098b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
@@ -0,0 +1,48 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeExapndable is an interface that nodes can implement to
+// signal that they can be expanded. Expanded nodes turn into
+// GraphNodeSubgraph nodes within the graph.
+type GraphNodeExpandable interface {
+ Expand(GraphBuilder) (GraphNodeSubgraph, error)
+}
+
+// GraphNodeDynamicExpandable is an interface that nodes can implement
+// to signal that they can be expanded at eval-time (hence dynamic).
+// These nodes are given the eval context and are expected to return
+// a new subgraph.
+type GraphNodeDynamicExpandable interface {
+ DynamicExpand(EvalContext) (*Graph, error)
+}
+
+// GraphNodeSubgraph is an interface a node can implement if it has
+// a larger subgraph that should be walked.
+type GraphNodeSubgraph interface {
+ Subgraph() dag.Grapher
+}
+
+// ExpandTransform is a transformer that does a subgraph expansion
+// at graph transform time (vs. at eval time). The benefit of earlier
+// subgraph expansion is that errors with the graph build can be detected
+// at an earlier stage.
+type ExpandTransform struct {
+ Builder GraphBuilder
+}
+
+func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) {
+ ev, ok := v.(GraphNodeExpandable)
+ if !ok {
+ // This isn't an expandable vertex, so just ignore it.
+ return v, nil
+ }
+
+ // Expand the subgraph!
+ log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev))
+ return ev.Expand(t.Builder)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
new file mode 100644
index 00000000..3673771c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
@@ -0,0 +1,38 @@
+package terraform
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ImportProviderValidateTransformer is a GraphTransformer that goes through
+// the providers in the graph and validates that they only depend on variables.
+type ImportProviderValidateTransformer struct{}
+
+func (t *ImportProviderValidateTransformer) Transform(g *Graph) error {
+ for _, v := range g.Vertices() {
+ // We only care about providers
+ pv, ok := v.(GraphNodeProvider)
+ if !ok {
+ continue
+ }
+
+ // We only care about providers that reference things
+ rn, ok := pv.(GraphNodeReferencer)
+ if !ok {
+ continue
+ }
+
+ for _, ref := range rn.References() {
+ if !strings.HasPrefix(ref, "var.") {
+ return fmt.Errorf(
+ "Provider %q depends on non-var %q. Providers for import can currently\n"+
+ "only depend on variables or must be hardcoded. You can stop import\n"+
+ "from loading configurations by specifying `-config=\"\"`.",
+ pv.ProviderName(), ref)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
new file mode 100644
index 00000000..081df2f8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
@@ -0,0 +1,241 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// ImportStateTransformer is a GraphTransformer that adds nodes to the
+// graph to represent the imports we want to do for resources.
+type ImportStateTransformer struct {
+ Targets []*ImportTarget
+}
+
+func (t *ImportStateTransformer) Transform(g *Graph) error {
+ nodes := make([]*graphNodeImportState, 0, len(t.Targets))
+ for _, target := range t.Targets {
+ addr, err := ParseResourceAddress(target.Addr)
+ if err != nil {
+ return fmt.Errorf(
+ "failed to parse resource address '%s': %s",
+ target.Addr, err)
+ }
+
+ nodes = append(nodes, &graphNodeImportState{
+ Addr: addr,
+ ID: target.ID,
+ Provider: target.Provider,
+ })
+ }
+
+ // Build the graph vertices
+ for _, n := range nodes {
+ g.Add(n)
+ }
+
+ return nil
+}
+
+type graphNodeImportState struct {
+ Addr *ResourceAddress // Addr is the resource address to import to
+ ID string // ID is the ID to import as
+ Provider string // Provider string
+
+ states []*InstanceState
+}
+
+func (n *graphNodeImportState) Name() string {
+ return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID)
+}
+
+func (n *graphNodeImportState) ProvidedBy() []string {
+ return []string{resourceProvider(n.Addr.Type, n.Provider)}
+}
+
+// GraphNodeSubPath
+func (n *graphNodeImportState) Path() []string {
+ return normalizeModulePath(n.Addr.Path)
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeImportState) EvalTree() EvalNode {
+ var provider ResourceProvider
+ info := &InstanceInfo{
+ Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name),
+ ModulePath: n.Path(),
+ Type: n.Addr.Type,
+ }
+
+ // Reset our states
+ n.states = nil
+
+ // Return our sequence
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: n.ProvidedBy()[0],
+ Output: &provider,
+ },
+ &EvalImportState{
+ Provider: &provider,
+ Info: info,
+ Id: n.ID,
+ Output: &n.states,
+ },
+ },
+ }
+}
+
+// GraphNodeDynamicExpandable impl.
+//
+// We use DynamicExpand as a way to generate the subgraph of refreshes
+// and state inserts we need to do for our import state. Since they're new
+// resources they don't depend on anything else and refreshes are isolated
+// so this is nearly a perfect use case for dynamic expand.
+func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
+ g := &Graph{Path: ctx.Path()}
+
+ // nameCounter is used to de-dup names in the state.
+ nameCounter := make(map[string]int)
+
+ // Compile the list of addresses that we'll be inserting into the state.
+ // We do this ahead of time so we can verify that we aren't importing
+ // something that already exists.
+ addrs := make([]*ResourceAddress, len(n.states))
+ for i, state := range n.states {
+ addr := *n.Addr
+ if t := state.Ephemeral.Type; t != "" {
+ addr.Type = t
+ }
+
+ // Determine if we need to suffix the name to de-dup
+ key := addr.String()
+ count, ok := nameCounter[key]
+ if ok {
+ count++
+ addr.Name += fmt.Sprintf("-%d", count)
+ }
+ nameCounter[key] = count
+
+ // Add it to our list
+ addrs[i] = &addr
+ }
+
+ // Verify that all the addresses are clear
+ state, lock := ctx.State()
+ lock.RLock()
+ defer lock.RUnlock()
+ filter := &StateFilter{State: state}
+ for _, addr := range addrs {
+ result, err := filter.Filter(addr.String())
+ if err != nil {
+ return nil, fmt.Errorf("Error verifying address %s: %s", addr, err)
+ }
+
+ // Go through the filter results and it is an error if we find
+ // a matching InstanceState, meaning that we would have a collision.
+ for _, r := range result {
+ if _, ok := r.Value.(*InstanceState); ok {
+ return nil, fmt.Errorf(
+ "Can't import %s, would collide with an existing resource.\n\n"+
+ "Please remove or rename this resource before continuing.",
+ addr)
+ }
+ }
+ }
+
+ // For each of the states, we add a node to handle the refresh/add to state.
+ // "n.states" is populated by our own EvalTree with the result of
+ // ImportState. Since DynamicExpand is always called after EvalTree, this
+ // is safe.
+ for i, state := range n.states {
+ g.Add(&graphNodeImportStateSub{
+ Target: addrs[i],
+ Path_: n.Path(),
+ State: state,
+ Provider: n.Provider,
+ })
+ }
+
+ // Root transform for a single root
+ t := &RootTransformer{}
+ if err := t.Transform(g); err != nil {
+ return nil, err
+ }
+
+ // Done!
+ return g, nil
+}
+
+// graphNodeImportStateSub is the sub-node of graphNodeImportState
+// and is part of the subgraph. This node is responsible for refreshing
+// and adding a resource to the state once it is imported.
+type graphNodeImportStateSub struct {
+ Target *ResourceAddress
+ State *InstanceState
+ Path_ []string
+ Provider string
+}
+
+func (n *graphNodeImportStateSub) Name() string {
+ return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID)
+}
+
+func (n *graphNodeImportStateSub) Path() []string {
+ return n.Path_
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeImportStateSub) EvalTree() EvalNode {
+ // If the Ephemeral type isn't set, then it is an error
+ if n.State.Ephemeral.Type == "" {
+ err := fmt.Errorf(
+ "import of %s didn't set type for %s",
+ n.Target.String(), n.State.ID)
+ return &EvalReturnError{Error: &err}
+ }
+
+ // DeepCopy so we're only modifying our local copy
+ state := n.State.DeepCopy()
+
+ // Build the resource info
+ info := &InstanceInfo{
+ Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name),
+ ModulePath: n.Path_,
+ Type: n.State.Ephemeral.Type,
+ }
+
+ // Key is the resource key
+ key := &ResourceStateKey{
+ Name: n.Target.Name,
+ Type: info.Type,
+ Index: n.Target.Index,
+ }
+
+ // The eval sequence
+ var provider ResourceProvider
+ return &EvalSequence{
+ Nodes: []EvalNode{
+ &EvalGetProvider{
+ Name: resourceProvider(info.Type, n.Provider),
+ Output: &provider,
+ },
+ &EvalRefresh{
+ Provider: &provider,
+ State: &state,
+ Info: info,
+ Output: &state,
+ },
+ &EvalImportStateVerify{
+ Info: info,
+ Id: n.State.ID,
+ State: &state,
+ },
+ &EvalWriteState{
+ Name: key.String(),
+ ResourceType: info.Type,
+ Provider: resourceProvider(info.Type, n.Provider),
+ State: &state,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
new file mode 100644
index 00000000..467950bd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
@@ -0,0 +1,120 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ModuleVariableTransformer is a GraphTransformer that adds all the variables
+// in the configuration to the graph.
+//
+// This only adds variables that are referenced by other things in the graph.
+// If a module variable is not referenced, it won't be added to the graph.
+type ModuleVariableTransformer struct {
+ Module *module.Tree
+
+ DisablePrune bool // True if pruning unreferenced should be disabled
+}
+
+func (t *ModuleVariableTransformer) Transform(g *Graph) error {
+ return t.transform(g, nil, t.Module)
+}
+
+func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error {
+ // If no config, no variables
+ if m == nil {
+ return nil
+ }
+
+ // Transform all the children. This must be done BEFORE the transform
+ // above since child module variables can reference parent module variables.
+ for _, c := range m.Children() {
+ if err := t.transform(g, m, c); err != nil {
+ return err
+ }
+ }
+
+ // If we have a parent, we can determine if a module variable is being
+ // used, so we transform this.
+ if parent != nil {
+ if err := t.transformSingle(g, parent, m); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error {
+ // If we have no vars, we're done!
+ vars := m.Config().Variables
+ if len(vars) == 0 {
+ log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path())
+ return nil
+ }
+
+ // Look for usage of this module
+ var mod *config.Module
+ for _, modUse := range parent.Config().Modules {
+ if modUse.Name == m.Name() {
+ mod = modUse
+ break
+ }
+ }
+ if mod == nil {
+ log.Printf("[INFO] Module %#v not used, not adding variables", m.Path())
+ return nil
+ }
+
+ // Build the reference map so we can determine if we're referencing things.
+ refMap := NewReferenceMap(g.Vertices())
+
+ // Add all variables here
+ for _, v := range vars {
+ // Determine the value of the variable. If it isn't in the
+ // configuration then it was never set and that's not a problem.
+ var value *config.RawConfig
+ if raw, ok := mod.RawConfig.Raw[v.Name]; ok {
+ var err error
+ value, err = config.NewRawConfig(map[string]interface{}{
+ v.Name: raw,
+ })
+ if err != nil {
+ // This shouldn't happen because it is already in
+ // a RawConfig above meaning it worked once before.
+ panic(err)
+ }
+ }
+
+ // Build the node.
+ //
+ // NOTE: For now this is just an "applyable" variable. As we build
+ // new graph builders for the other operations I suspect we'll
+ // find a way to parameterize this, require new transforms, etc.
+ node := &NodeApplyableModuleVariable{
+ PathValue: normalizeModulePath(m.Path()),
+ Config: v,
+ Value: value,
+ Module: t.Module,
+ }
+
+ if !t.DisablePrune {
+ // If the node is not referenced by anything, then we don't need
+ // to include it since it won't be used.
+ if matches := refMap.ReferencedBy(node); len(matches) == 0 {
+ log.Printf(
+ "[INFO] Not including %q in graph, nothing depends on it",
+ dag.VertexName(node))
+ continue
+ }
+ }
+
+ // Add it!
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
new file mode 100644
index 00000000..b256a25b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
@@ -0,0 +1,110 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// OrphanResourceCountTransformer is a GraphTransformer that adds orphans
+// for an expanded count to the graph. The determination of this depends
+// on the count argument given.
+//
+// Orphans are found by comparing the count to what is found in the state.
+// This transform assumes that if an element in the state is within the count
+// bounds given, that it is not an orphan.
+type OrphanResourceCountTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ Count int // Actual count of the resource
+ Addr *ResourceAddress // Addr of the resource to look for orphans
+ State *State // Full global state
+}
+
+func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
+ log.Printf("[TRACE] OrphanResourceCount: Starting...")
+
+ // Grab the module in the state just for this resource address
+ ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path))
+ if ms == nil {
+ // If no state, there can't be orphans
+ return nil
+ }
+
+ orphanIndex := -1
+ if t.Count == 1 {
+ orphanIndex = 0
+ }
+
+ // Go through the orphans and add them all to the state
+ for key, _ := range ms.Resources {
+ // Build the address
+ addr, err := parseResourceAddressInternal(key)
+ if err != nil {
+ return err
+ }
+ addr.Path = ms.Path[1:]
+
+ // Copy the address for comparison. If we aren't looking at
+ // the same resource, then just ignore it.
+ addrCopy := addr.Copy()
+ addrCopy.Index = -1
+ if !addrCopy.Equals(t.Addr) {
+ continue
+ }
+
+ log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr)
+
+ idx := addr.Index
+
+ // If we have zero and the index here is 0 or 1, then we
+ // change the index to a high number so that we treat it as
+ // an orphan.
+ if t.Count <= 0 && idx <= 0 {
+ idx = t.Count + 1
+ }
+
+ // If we have a count greater than 0 and we're at the zero index,
+ // we do a special case check to see if our state also has a
+ // -1 index value. If so, this is an orphan because our rules are
+ // that if both a -1 and 0 are in the state, the 0 is destroyed.
+ if t.Count > 0 && idx == orphanIndex {
+ // This is a piece of cleverness (beware), but its simple:
+ // if orphanIndex is 0, then check -1, else check 0.
+ checkIndex := (orphanIndex + 1) * -1
+
+ key := &ResourceStateKey{
+ Name: addr.Name,
+ Type: addr.Type,
+ Mode: addr.Mode,
+ Index: checkIndex,
+ }
+
+ if _, ok := ms.Resources[key.String()]; ok {
+ // We have a -1 index, too. Make an arbitrarily high
+ // index so that we always mark this as an orphan.
+ log.Printf(
+ "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d",
+ addr, orphanIndex)
+ idx = t.Count + 1
+ }
+ }
+
+ // If the index is within the count bounds, it is not an orphan
+ if idx < t.Count {
+ continue
+ }
+
+ // Build the abstract node and the concrete one
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ // Add it to the graph
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
new file mode 100644
index 00000000..49568d5b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
@@ -0,0 +1,64 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// OrphanOutputTransformer finds the outputs that aren't present
+// in the given config that are in the state and adds them to the graph
+// for deletion.
+type OrphanOutputTransformer struct {
+ Module *module.Tree // Root module
+ State *State // State is the root state
+}
+
+func (t *OrphanOutputTransformer) Transform(g *Graph) error {
+ if t.State == nil {
+ log.Printf("[DEBUG] No state, no orphan outputs")
+ return nil
+ }
+
+ return t.transform(g, t.Module)
+}
+
+func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error {
+ // Get our configuration, and recurse into children
+ var c *config.Config
+ if m != nil {
+ c = m.Config()
+ for _, child := range m.Children() {
+ if err := t.transform(g, child); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Get the state. If there is no state, then we have no orphans!
+ path := normalizeModulePath(m.Path())
+ state := t.State.ModuleByPath(path)
+ if state == nil {
+ return nil
+ }
+
+ // Make a map of the valid outputs
+ valid := make(map[string]struct{})
+ for _, o := range c.Outputs {
+ valid[o.Name] = struct{}{}
+ }
+
+ // Go through the outputs and find the ones that aren't in our config.
+ for n, _ := range state.Outputs {
+ // If it is in the valid map, then ignore
+ if _, ok := valid[n]; ok {
+ continue
+ }
+
+ // Orphan!
+ g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path})
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
new file mode 100644
index 00000000..e42d3c84
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
@@ -0,0 +1,78 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// OrphanResourceTransformer is a GraphTransformer that adds resource
+// orphans to the graph. A resource orphan is a resource that is
+// represented in the state but not in the configuration.
+//
+// This only adds orphans that have no representation at all in the
+// configuration.
+type OrphanResourceTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ // State is the global state. We require the global state to
+ // properly find module orphans at our path.
+ State *State
+
+ // Module is the root module. We'll look up the proper configuration
+ // using the graph path.
+ Module *module.Tree
+}
+
+func (t *OrphanResourceTransformer) Transform(g *Graph) error {
+ if t.State == nil {
+ // If the entire state is nil, there can't be any orphans
+ return nil
+ }
+
+ // Go through the modules and for each module transform in order
+ // to add the orphan.
+ for _, ms := range t.State.Modules {
+ if err := t.transform(g, ms); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error {
+ if ms == nil {
+ return nil
+ }
+
+ // Get the configuration for this path. The configuration might be
+ // nil if the module was removed from the configuration. This is okay,
+ // this just means that every resource is an orphan.
+ var c *config.Config
+ if m := t.Module.Child(ms.Path[1:]); m != nil {
+ c = m.Config()
+ }
+
+ // Go through the orphans and add them all to the state
+ for _, key := range ms.Orphans(c) {
+ // Build the abstract resource
+ addr, err := parseResourceAddressInternal(key)
+ if err != nil {
+ return err
+ }
+ addr.Path = ms.Path[1:]
+
+ // Build the abstract node and the concrete one
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ // Add it to the graph
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
new file mode 100644
index 00000000..b260f4ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
@@ -0,0 +1,59 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// OutputTransformer is a GraphTransformer that adds all the outputs
+// in the configuration to the graph.
+//
+// This is done for the apply graph builder even if dependent nodes
+// aren't changing since there is no downside: the state will be available
+// even if the dependent items aren't changing.
+type OutputTransformer struct {
+ Module *module.Tree
+}
+
+func (t *OutputTransformer) Transform(g *Graph) error {
+ return t.transform(g, t.Module)
+}
+
+func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error {
+ // If no config, no outputs
+ if m == nil {
+ return nil
+ }
+
+ // Transform all the children. We must do this first because
+ // we can reference module outputs and they must show up in the
+ // reference map.
+ for _, c := range m.Children() {
+ if err := t.transform(g, c); err != nil {
+ return err
+ }
+ }
+
+ // If we have no outputs, we're done!
+ os := m.Config().Outputs
+ if len(os) == 0 {
+ return nil
+ }
+
+ // Add all outputs here
+ for _, o := range os {
+ // Build the node.
+ //
+ // NOTE: For now this is just an "applyable" output. As we build
+ // new graph builders for the other operations I suspect we'll
+ // find a way to parameterize this, require new transforms, etc.
+ node := &NodeApplyableOutput{
+ PathValue: normalizeModulePath(m.Path()),
+ Config: o,
+ }
+
+ // Add it!
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
new file mode 100644
index 00000000..b9695d52
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
@@ -0,0 +1,380 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeProvider is an interface that nodes that can be a provider
+// must implement. The ProviderName returned is the name of the provider
+// they satisfy.
+type GraphNodeProvider interface {
+ ProviderName() string
+}
+
+// GraphNodeCloseProvider is an interface that nodes that can be a close
+// provider must implement. The CloseProviderName returned is the name of
+// the provider they satisfy.
+type GraphNodeCloseProvider interface {
+ CloseProviderName() string
+}
+
+// GraphNodeProviderConsumer is an interface that nodes that require
+// a provider must implement. ProvidedBy must return the name of the provider
+// to use.
+type GraphNodeProviderConsumer interface {
+ ProvidedBy() []string
+}
+
+// ProviderTransformer is a GraphTransformer that maps resources to
+// providers within the graph. This will error if there are any resources
+// that don't map to proper resources.
+type ProviderTransformer struct{}
+
+func (t *ProviderTransformer) Transform(g *Graph) error {
+ // Go through the other nodes and match them to providers they need
+ var err error
+ m := providerVertexMap(g)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProviderConsumer); ok {
+ for _, p := range pv.ProvidedBy() {
+ target := m[providerMapKey(p, pv)]
+ if target == nil {
+ println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv)))
+ err = multierror.Append(err, fmt.Errorf(
+ "%s: provider %s couldn't be found",
+ dag.VertexName(v), p))
+ continue
+ }
+
+ g.Connect(dag.BasicEdge(v, target))
+ }
+ }
+ }
+
+ return err
+}
+
+// CloseProviderTransformer is a GraphTransformer that adds nodes to the
+// graph that will close open provider connections that aren't needed anymore.
+// A provider connection is not needed anymore once all depended resources
+// in the graph are evaluated.
+type CloseProviderTransformer struct{}
+
+func (t *CloseProviderTransformer) Transform(g *Graph) error {
+ pm := providerVertexMap(g)
+ cpm := closeProviderVertexMap(g)
+ var err error
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProviderConsumer); ok {
+ for _, p := range pv.ProvidedBy() {
+ key := p
+ source := cpm[key]
+
+ if source == nil {
+ // Create a new graphNodeCloseProvider and add it to the graph
+ source = &graphNodeCloseProvider{ProviderNameValue: p}
+ g.Add(source)
+
+ // Close node needs to depend on provider
+ provider, ok := pm[key]
+ if !ok {
+ err = multierror.Append(err, fmt.Errorf(
+ "%s: provider %s couldn't be found for closing",
+ dag.VertexName(v), p))
+ continue
+ }
+ g.Connect(dag.BasicEdge(source, provider))
+
+ // Make sure we also add the new graphNodeCloseProvider to the map
+ // so we don't create and add any duplicate graphNodeCloseProviders.
+ cpm[key] = source
+ }
+
+ // Close node depends on all nodes provided by the provider
+ g.Connect(dag.BasicEdge(source, v))
+ }
+ }
+ }
+
+ return err
+}
+
+// MissingProviderTransformer is a GraphTransformer that adds nodes
+// for missing providers into the graph. Specifically, it creates provider
+// configuration nodes for all the providers that we support. These are
+// pruned later during an optimization pass.
+type MissingProviderTransformer struct {
+ // Providers is the list of providers we support.
+ Providers []string
+
+ // AllowAny will not check that a provider is supported before adding
+ // it to the graph.
+ AllowAny bool
+
+ // Concrete, if set, overrides how the providers are made.
+ Concrete ConcreteProviderNodeFunc
+}
+
+func (t *MissingProviderTransformer) Transform(g *Graph) error {
+ // Initialize factory
+ if t.Concrete == nil {
+ t.Concrete = func(a *NodeAbstractProvider) dag.Vertex {
+ return a
+ }
+ }
+
+ // Create a set of our supported providers
+ supported := make(map[string]struct{}, len(t.Providers))
+ for _, v := range t.Providers {
+ supported[v] = struct{}{}
+ }
+
+ // Get the map of providers we already have in our graph
+ m := providerVertexMap(g)
+
+ // Go through all the provider consumers and make sure we add
+ // that provider if it is missing. We use a for loop here instead
+ // of "range" since we'll modify check as we go to add more to check.
+ check := g.Vertices()
+ for i := 0; i < len(check); i++ {
+ v := check[i]
+
+ pv, ok := v.(GraphNodeProviderConsumer)
+ if !ok {
+ continue
+ }
+
+ // If this node has a subpath, then we use that as a prefix
+ // into our map to check for an existing provider.
+ var path []string
+ if sp, ok := pv.(GraphNodeSubPath); ok {
+ raw := normalizeModulePath(sp.Path())
+ if len(raw) > len(rootModulePath) {
+ path = raw
+ }
+ }
+
+ for _, p := range pv.ProvidedBy() {
+ key := providerMapKey(p, pv)
+ if _, ok := m[key]; ok {
+ // This provider already exists as a configure node
+ continue
+ }
+
+ // If the provider has an alias in it, we just want the type
+ ptype := p
+ if idx := strings.IndexRune(p, '.'); idx != -1 {
+ ptype = p[:idx]
+ }
+
+ if !t.AllowAny {
+ if _, ok := supported[ptype]; !ok {
+ // If we don't support the provider type, skip it.
+ // Validation later will catch this as an error.
+ continue
+ }
+ }
+
+ // Add the missing provider node to the graph
+ v := t.Concrete(&NodeAbstractProvider{
+ NameValue: p,
+ PathValue: path,
+ }).(dag.Vertex)
+ if len(path) > 0 {
+ // We'll need the parent provider as well, so let's
+ // add a dummy node to check to make sure that we add
+ // that parent provider.
+ check = append(check, &graphNodeProviderConsumerDummy{
+ ProviderValue: p,
+ PathValue: path[:len(path)-1],
+ })
+ }
+
+ m[key] = g.Add(v)
+ }
+ }
+
+ return nil
+}
+
+// ParentProviderTransformer connects provider nodes to their parents.
+//
+// This works by finding nodes that are both GraphNodeProviders and
+// GraphNodeSubPath. It then connects the providers to their parent
+// path.
+type ParentProviderTransformer struct{}
+
+func (t *ParentProviderTransformer) Transform(g *Graph) error {
+ // Make a mapping of path to dag.Vertex, where path is: "path.name"
+ m := make(map[string]dag.Vertex)
+
+ // Also create a map that maps a provider to its parent
+ parentMap := make(map[dag.Vertex]string)
+ for _, raw := range g.Vertices() {
+ // If it is the flat version, then make it the non-flat version.
+ // We eventually want to get rid of the flat version entirely so
+ // this is a stop-gap while it still exists.
+ var v dag.Vertex = raw
+
+ // Only care about providers
+ pn, ok := v.(GraphNodeProvider)
+ if !ok || pn.ProviderName() == "" {
+ continue
+ }
+
+ // Also require a subpath, if there is no subpath then we
+ // just totally ignore it. The expectation of this transform is
+ // that it is used with a graph builder that is already flattened.
+ var path []string
+ if pn, ok := raw.(GraphNodeSubPath); ok {
+ path = pn.Path()
+ }
+ path = normalizeModulePath(path)
+
+ // Build the key with path.name i.e. "child.subchild.aws"
+ key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
+ m[key] = raw
+
+ // Determine the parent if we're non-root. This is length 1 since
+ // the 0 index should be "root" since we normalize above.
+ if len(path) > 1 {
+ path = path[:len(path)-1]
+ key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
+ parentMap[raw] = key
+ }
+ }
+
+ // Connect!
+ for v, key := range parentMap {
+ if parent, ok := m[key]; ok {
+ g.Connect(dag.BasicEdge(v, parent))
+ }
+ }
+
+ return nil
+}
+
+// PruneProviderTransformer is a GraphTransformer that prunes all the
+// providers that aren't needed from the graph. A provider is unneeded if
+// no resource or module is using that provider.
+type PruneProviderTransformer struct{}
+
+func (t *PruneProviderTransformer) Transform(g *Graph) error {
+ for _, v := range g.Vertices() {
+ // We only care about the providers
+ if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" {
+ continue
+ }
+ // Does anything depend on this? If not, then prune it.
+ if s := g.UpEdges(v); s.Len() == 0 {
+ if nv, ok := v.(dag.NamedVertex); ok {
+ log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name())
+ }
+ g.Remove(v)
+ }
+ }
+
+ return nil
+}
+
+// providerMapKey is a helper that gives us the key to use for the
+// maps returned by things such as providerVertexMap.
+func providerMapKey(k string, v dag.Vertex) string {
+ pathPrefix := ""
+ if sp, ok := v.(GraphNodeSubPath); ok {
+ raw := normalizeModulePath(sp.Path())
+ if len(raw) > len(rootModulePath) {
+ pathPrefix = modulePrefixStr(raw) + "."
+ }
+ }
+
+ return pathPrefix + k
+}
+
+func providerVertexMap(g *Graph) map[string]dag.Vertex {
+ m := make(map[string]dag.Vertex)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProvider); ok {
+ key := providerMapKey(pv.ProviderName(), v)
+ m[key] = v
+ }
+ }
+
+ return m
+}
+
+func closeProviderVertexMap(g *Graph) map[string]dag.Vertex {
+ m := make(map[string]dag.Vertex)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeCloseProvider); ok {
+ m[pv.CloseProviderName()] = v
+ }
+ }
+
+ return m
+}
+
+type graphNodeCloseProvider struct {
+ ProviderNameValue string
+}
+
+func (n *graphNodeCloseProvider) Name() string {
+ return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue)
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeCloseProvider) EvalTree() EvalNode {
+ return CloseProviderEvalTree(n.ProviderNameValue)
+}
+
+// GraphNodeDependable impl.
+func (n *graphNodeCloseProvider) DependableName() []string {
+ return []string{n.Name()}
+}
+
+func (n *graphNodeCloseProvider) CloseProviderName() string {
+ return n.ProviderNameValue
+}
+
+// GraphNodeDotter impl.
+func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
+ if !opts.Verbose {
+ return nil
+ }
+ return &dag.DotNode{
+ Name: name,
+ Attrs: map[string]string{
+ "label": n.Name(),
+ "shape": "diamond",
+ },
+ }
+}
+
+// RemovableIfNotTargeted
+func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool {
+ // We need to add this so that this node will be removed if
+ // it isn't targeted or a dependency of a target.
+ return true
+}
+
+// graphNodeProviderConsumerDummy is a struct that never enters the real
+// graph (though it could to no ill effect). It implements
+// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force
+// certain transformations.
+type graphNodeProviderConsumerDummy struct {
+ ProviderValue string
+ PathValue []string
+}
+
+func (n *graphNodeProviderConsumerDummy) Path() []string {
+ return n.PathValue
+}
+
+func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string {
+ return []string{n.ProviderValue}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
new file mode 100644
index 00000000..d9919f3a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
@@ -0,0 +1,50 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// DisableProviderTransformer "disables" any providers that are not actually
+// used by anything. This avoids the provider being initialized and configured.
+// This both saves resources but also avoids errors since configuration
+// may imply initialization which may require auth.
+type DisableProviderTransformer struct{}
+
+func (t *DisableProviderTransformer) Transform(g *Graph) error {
+ for _, v := range g.Vertices() {
+ // We only care about providers
+ pn, ok := v.(GraphNodeProvider)
+ if !ok || pn.ProviderName() == "" {
+ continue
+ }
+
+ // If we have dependencies, then don't disable
+ if g.UpEdges(v).Len() > 0 {
+ continue
+ }
+
+ // Get the path
+ var path []string
+ if pn, ok := v.(GraphNodeSubPath); ok {
+ path = pn.Path()
+ }
+
+ // Disable the provider by replacing it with a "disabled" provider
+ disabled := &NodeDisabledProvider{
+ NodeAbstractProvider: &NodeAbstractProvider{
+ NameValue: pn.ProviderName(),
+ PathValue: path,
+ },
+ }
+
+ if !g.Replace(v, disabled) {
+ panic(fmt.Sprintf(
+ "vertex disappeared from under us: %s",
+ dag.VertexName(v)))
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
new file mode 100644
index 00000000..f49d8241
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
@@ -0,0 +1,206 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeProvisioner is an interface that nodes that can be a provisioner
+// must implement. The ProvisionerName returned is the name of the provisioner
+// they satisfy.
+type GraphNodeProvisioner interface {
+ ProvisionerName() string
+}
+
+// GraphNodeCloseProvisioner is an interface that nodes that can be a close
+// provisioner must implement. The CloseProvisionerName returned is the name
+// of the provisioner they satisfy.
+type GraphNodeCloseProvisioner interface {
+ CloseProvisionerName() string
+}
+
+// GraphNodeProvisionerConsumer is an interface that nodes that require
+// a provisioner must implement. ProvisionedBy must return the name of the
+// provisioner to use.
+type GraphNodeProvisionerConsumer interface {
+ ProvisionedBy() []string
+}
+
+// ProvisionerTransformer is a GraphTransformer that maps resources to
+// provisioners within the graph. This will error if there are any resources
+// that don't map to proper resources.
+type ProvisionerTransformer struct{}
+
+func (t *ProvisionerTransformer) Transform(g *Graph) error {
+ // Go through the other nodes and match them to provisioners they need
+ var err error
+ m := provisionerVertexMap(g)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
+ for _, p := range pv.ProvisionedBy() {
+ key := provisionerMapKey(p, pv)
+ if m[key] == nil {
+ err = multierror.Append(err, fmt.Errorf(
+ "%s: provisioner %s couldn't be found",
+ dag.VertexName(v), p))
+ continue
+ }
+
+ g.Connect(dag.BasicEdge(v, m[key]))
+ }
+ }
+ }
+
+ return err
+}
+
+// MissingProvisionerTransformer is a GraphTransformer that adds nodes
+// for missing provisioners into the graph.
+type MissingProvisionerTransformer struct {
+ // Provisioners is the list of provisioners we support.
+ Provisioners []string
+}
+
+func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
+ // Create a set of our supported provisioners
+ supported := make(map[string]struct{}, len(t.Provisioners))
+ for _, v := range t.Provisioners {
+ supported[v] = struct{}{}
+ }
+
+ // Get the map of provisioners we already have in our graph
+ m := provisionerVertexMap(g)
+
+ // Go through all the provisioner consumers and make sure we add
+ // that provisioner if it is missing.
+ for _, v := range g.Vertices() {
+ pv, ok := v.(GraphNodeProvisionerConsumer)
+ if !ok {
+ continue
+ }
+
+ // If this node has a subpath, then we use that as a prefix
+ // into our map to check for an existing provider.
+ var path []string
+ if sp, ok := pv.(GraphNodeSubPath); ok {
+ raw := normalizeModulePath(sp.Path())
+ if len(raw) > len(rootModulePath) {
+ path = raw
+ }
+ }
+
+ for _, p := range pv.ProvisionedBy() {
+ // Build the key for storing in the map
+ key := provisionerMapKey(p, pv)
+
+ if _, ok := m[key]; ok {
+ // This provisioner already exists as a configure node
+ continue
+ }
+
+ if _, ok := supported[p]; !ok {
+ // If we don't support the provisioner type, skip it.
+ // Validation later will catch this as an error.
+ continue
+ }
+
+ // Build the vertex
+ var newV dag.Vertex = &NodeProvisioner{
+ NameValue: p,
+ PathValue: path,
+ }
+
+ // Add the missing provisioner node to the graph
+ m[key] = g.Add(newV)
+ }
+ }
+
+ return nil
+}
+
+// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the
+// graph that will close open provisioner connections that aren't needed
+// anymore. A provisioner connection is not needed anymore once all depended
+// resources in the graph are evaluated.
+type CloseProvisionerTransformer struct{}
+
+func (t *CloseProvisionerTransformer) Transform(g *Graph) error {
+ m := closeProvisionerVertexMap(g)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
+ for _, p := range pv.ProvisionedBy() {
+ source := m[p]
+
+ if source == nil {
+ // Create a new graphNodeCloseProvisioner and add it to the graph
+ source = &graphNodeCloseProvisioner{ProvisionerNameValue: p}
+ g.Add(source)
+
+ // Make sure we also add the new graphNodeCloseProvisioner to the map
+ // so we don't create and add any duplicate graphNodeCloseProvisioners.
+ m[p] = source
+ }
+
+ g.Connect(dag.BasicEdge(source, v))
+ }
+ }
+ }
+
+ return nil
+}
+
+// provisionerMapKey is a helper that gives us the key to use for the
+// maps returned by things such as provisionerVertexMap.
+func provisionerMapKey(k string, v dag.Vertex) string {
+ pathPrefix := ""
+ if sp, ok := v.(GraphNodeSubPath); ok {
+ raw := normalizeModulePath(sp.Path())
+ if len(raw) > len(rootModulePath) {
+ pathPrefix = modulePrefixStr(raw) + "."
+ }
+ }
+
+ return pathPrefix + k
+}
+
+func provisionerVertexMap(g *Graph) map[string]dag.Vertex {
+ m := make(map[string]dag.Vertex)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeProvisioner); ok {
+ key := provisionerMapKey(pv.ProvisionerName(), v)
+ m[key] = v
+ }
+ }
+
+ return m
+}
+
+func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex {
+ m := make(map[string]dag.Vertex)
+ for _, v := range g.Vertices() {
+ if pv, ok := v.(GraphNodeCloseProvisioner); ok {
+ m[pv.CloseProvisionerName()] = v
+ }
+ }
+
+ return m
+}
+
+type graphNodeCloseProvisioner struct {
+ ProvisionerNameValue string
+}
+
+func (n *graphNodeCloseProvisioner) Name() string {
+ return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue)
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeCloseProvisioner) EvalTree() EvalNode {
+ return &EvalCloseProvisioner{Name: n.ProvisionerNameValue}
+}
+
+func (n *graphNodeCloseProvisioner) CloseProvisionerName() string {
+ return n.ProvisionerNameValue
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
new file mode 100644
index 00000000..c5452354
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
@@ -0,0 +1,321 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeReferenceable must be implemented by any node that represents
+// a Terraform thing that can be referenced (resource, module, etc.).
+//
+// Even if the thing has no name, this should return an empty list. By
+// implementing this and returning a non-nil result, you say that this CAN
+// be referenced and other methods of referencing may still be possible (such
+// as by path!)
+type GraphNodeReferenceable interface {
+ // ReferenceableName is the name by which this can be referenced.
+ // This can be either just the type, or include the field. Example:
+ // "aws_instance.bar" or "aws_instance.bar.id".
+ ReferenceableName() []string
+}
+
+// GraphNodeReferencer must be implemented by nodes that reference other
+// Terraform items and therefore depend on them.
+type GraphNodeReferencer interface {
+ // References are the list of things that this node references. This
+ // can include fields or just the type, just like GraphNodeReferenceable
+ // above.
+ References() []string
+}
+
+// GraphNodeReferenceGlobal is an interface that can optionally be
+// implemented. If ReferenceGlobal returns true, then the References()
+// and ReferenceableName() must be _fully qualified_ with "module.foo.bar"
+// etc.
+//
+// This allows a node to reference and be referenced by a specific name
+// that may cross module boundaries. This can be very dangerous so use
+// this wisely.
+//
+// The primary use case for this is module boundaries (variables coming in).
+type GraphNodeReferenceGlobal interface {
+ // Set to true to signal that references and name are fully
+ // qualified. See the above docs for more information.
+ ReferenceGlobal() bool
+}
+
+// ReferenceTransformer is a GraphTransformer that connects all the
+// nodes that reference each other in order to form the proper ordering.
+type ReferenceTransformer struct{}
+
+func (t *ReferenceTransformer) Transform(g *Graph) error {
+ // Build a reference map so we can efficiently look up the references
+ vs := g.Vertices()
+ m := NewReferenceMap(vs)
+
+ // Find the things that reference things and connect them
+ for _, v := range vs {
+ parents, _ := m.References(v)
+ parentsDbg := make([]string, len(parents))
+ for i, v := range parents {
+ parentsDbg[i] = dag.VertexName(v)
+ }
+ log.Printf(
+ "[DEBUG] ReferenceTransformer: %q references: %v",
+ dag.VertexName(v), parentsDbg)
+
+ for _, parent := range parents {
+ g.Connect(dag.BasicEdge(v, parent))
+ }
+ }
+
+ return nil
+}
+
+// ReferenceMap is a structure that can be used to efficiently check
+// for references on a graph.
+type ReferenceMap struct {
+ // m is the mapping of referenceable name to list of verticies that
+ // implement that name. This is built on initialization.
+ references map[string][]dag.Vertex
+ referencedBy map[string][]dag.Vertex
+}
+
+// References returns the list of vertices that this vertex
+// references along with any missing references.
+func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) {
+ rn, ok := v.(GraphNodeReferencer)
+ if !ok {
+ return nil, nil
+ }
+
+ var matches []dag.Vertex
+ var missing []string
+ prefix := m.prefix(v)
+ for _, ns := range rn.References() {
+ found := false
+ for _, n := range strings.Split(ns, "/") {
+ n = prefix + n
+ parents, ok := m.references[n]
+ if !ok {
+ continue
+ }
+
+ // Mark that we found a match
+ found = true
+
+ // Make sure this isn't a self reference, which isn't included
+ selfRef := false
+ for _, p := range parents {
+ if p == v {
+ selfRef = true
+ break
+ }
+ }
+ if selfRef {
+ continue
+ }
+
+ matches = append(matches, parents...)
+ break
+ }
+
+ if !found {
+ missing = append(missing, ns)
+ }
+ }
+
+ return matches, missing
+}
+
+// ReferencedBy returns the list of vertices that reference the
+// vertex passed in.
+func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex {
+ rn, ok := v.(GraphNodeReferenceable)
+ if !ok {
+ return nil
+ }
+
+ var matches []dag.Vertex
+ prefix := m.prefix(v)
+ for _, n := range rn.ReferenceableName() {
+ n = prefix + n
+ children, ok := m.referencedBy[n]
+ if !ok {
+ continue
+ }
+
+ // Make sure this isn't a self reference, which isn't included
+ selfRef := false
+ for _, p := range children {
+ if p == v {
+ selfRef = true
+ break
+ }
+ }
+ if selfRef {
+ continue
+ }
+
+ matches = append(matches, children...)
+ }
+
+ return matches
+}
+
+func (m *ReferenceMap) prefix(v dag.Vertex) string {
+ // If the node is stating it is already fully qualified then
+ // we don't have to create the prefix!
+ if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() {
+ return ""
+ }
+
+ // Create the prefix based on the path
+ var prefix string
+ if pn, ok := v.(GraphNodeSubPath); ok {
+ if path := normalizeModulePath(pn.Path()); len(path) > 1 {
+ prefix = modulePrefixStr(path) + "."
+ }
+ }
+
+ return prefix
+}
+
+// NewReferenceMap is used to create a new reference map for the
+// given set of vertices.
+func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
+ var m ReferenceMap
+
+ // Build the lookup table
+ refMap := make(map[string][]dag.Vertex)
+ for _, v := range vs {
+ // We're only looking for referenceable nodes
+ rn, ok := v.(GraphNodeReferenceable)
+ if !ok {
+ continue
+ }
+
+ // Go through and cache them
+ prefix := m.prefix(v)
+ for _, n := range rn.ReferenceableName() {
+ n = prefix + n
+ refMap[n] = append(refMap[n], v)
+ }
+
+ // If there is a path, it is always referenceable by that. For
+ // example, if this is a referenceable thing at path []string{"foo"},
+ // then it can be referenced at "module.foo"
+ if pn, ok := v.(GraphNodeSubPath); ok {
+ for _, p := range ReferenceModulePath(pn.Path()) {
+ refMap[p] = append(refMap[p], v)
+ }
+ }
+ }
+
+ // Build the lookup table for referenced by
+ refByMap := make(map[string][]dag.Vertex)
+ for _, v := range vs {
+ // We're only looking for referenceable nodes
+ rn, ok := v.(GraphNodeReferencer)
+ if !ok {
+ continue
+ }
+
+ // Go through and cache them
+ prefix := m.prefix(v)
+ for _, n := range rn.References() {
+ n = prefix + n
+ refByMap[n] = append(refByMap[n], v)
+ }
+ }
+
+ m.references = refMap
+ m.referencedBy = refByMap
+ return &m
+}
+
+// Returns the reference name for a module path. The path "foo" would return
+// "module.foo". If this is a deeply nested module, it will be every parent
+// as well. For example: ["foo", "bar"] would return both "module.foo" and
+// "module.foo.module.bar"
+func ReferenceModulePath(p []string) []string {
+ p = normalizeModulePath(p)
+ if len(p) == 1 {
+ // Root, no name
+ return nil
+ }
+
+ result := make([]string, 0, len(p)-1)
+ for i := len(p); i > 1; i-- {
+ result = append(result, modulePrefixStr(p[:i]))
+ }
+
+ return result
+}
+
+// ReferencesFromConfig returns the references that a configuration has
+// based on the interpolated variables in a configuration.
+func ReferencesFromConfig(c *config.RawConfig) []string {
+ var result []string
+ for _, v := range c.Variables {
+ if r := ReferenceFromInterpolatedVar(v); len(r) > 0 {
+ result = append(result, r...)
+ }
+ }
+
+ return result
+}
+
+// ReferenceFromInterpolatedVar returns the reference from this variable,
+// or an empty string if there is no reference.
+func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string {
+ switch v := v.(type) {
+ case *config.ModuleVariable:
+ return []string{fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)}
+ case *config.ResourceVariable:
+ id := v.ResourceId()
+
+ // If we have a multi-reference (splat), then we depend on ALL
+ // resources with this type/name.
+ if v.Multi && v.Index == -1 {
+ return []string{fmt.Sprintf("%s.*", id)}
+ }
+
+ // Otherwise, we depend on a specific index.
+ idx := v.Index
+ if !v.Multi || v.Index == -1 {
+ idx = 0
+ }
+
+ // Depend on the index, as well as "N" which represents the
+ // un-expanded set of resources.
+ return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)}
+ case *config.UserVariable:
+ return []string{fmt.Sprintf("var.%s", v.Name)}
+ default:
+ return nil
+ }
+}
+
+func modulePrefixStr(p []string) string {
+ parts := make([]string, 0, len(p)*2)
+ for _, p := range p[1:] {
+ parts = append(parts, "module", p)
+ }
+
+ return strings.Join(parts, ".")
+}
+
+func modulePrefixList(result []string, prefix string) []string {
+ if prefix != "" {
+ for i, v := range result {
+ result[i] = fmt.Sprintf("%s.%s", prefix, v)
+ }
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
new file mode 100644
index 00000000..cda35cb7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
@@ -0,0 +1,51 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// ResourceCountTransformer is a GraphTransformer that expands the count
+// out for a specific resource.
+//
+// This assumes that the count is already interpolated.
+type ResourceCountTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ Count int
+ Addr *ResourceAddress
+}
+
+func (t *ResourceCountTransformer) Transform(g *Graph) error {
+ // Don't allow the count to be negative
+ if t.Count < 0 {
+ return fmt.Errorf("negative count: %d", t.Count)
+ }
+
+ // For each count, build and add the node
+ for i := 0; i < t.Count; i++ {
+ // Set the index. If our count is 1 we special case it so that
+ // we handle the "resource.0" and "resource" boundary properly.
+ index := i
+ if t.Count == 1 {
+ index = -1
+ }
+
+ // Build the resource address
+ addr := t.Addr.Copy()
+ addr.Index = index
+
+ // Build the abstract node and the concrete one
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ // Add it to the graph
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
new file mode 100644
index 00000000..aee053d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
@@ -0,0 +1,38 @@
+package terraform
+
+import "github.com/hashicorp/terraform/dag"
+
+const rootNodeName = "root"
+
+// RootTransformer is a GraphTransformer that adds a root to the graph.
+type RootTransformer struct{}
+
+func (t *RootTransformer) Transform(g *Graph) error {
+ // If we already have a good root, we're done
+ if _, err := g.Root(); err == nil {
+ return nil
+ }
+
+ // Add a root
+ var root graphNodeRoot
+ g.Add(root)
+
+ // Connect the root to all the edges that need it
+ for _, v := range g.Vertices() {
+ if v == root {
+ continue
+ }
+
+ if g.UpEdges(v).Len() == 0 {
+ g.Connect(dag.BasicEdge(root, v))
+ }
+ }
+
+ return nil
+}
+
+type graphNodeRoot struct{}
+
+func (n graphNodeRoot) Name() string {
+ return rootNodeName
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
new file mode 100644
index 00000000..471cd746
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
@@ -0,0 +1,65 @@
+package terraform
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// StateTransformer is a GraphTransformer that adds the elements of
+// the state to the graph.
+//
+// This transform is used for example by the DestroyPlanGraphBuilder to ensure
+// that only resources that are in the state are represented in the graph.
+type StateTransformer struct {
+ Concrete ConcreteResourceNodeFunc
+
+ State *State
+}
+
+func (t *StateTransformer) Transform(g *Graph) error {
+ // If the state is nil or empty (nil is empty) then do nothing
+ if t.State.Empty() {
+ return nil
+ }
+
+ // Go through all the modules in the diff.
+ log.Printf("[TRACE] StateTransformer: starting")
+ var nodes []dag.Vertex
+ for _, ms := range t.State.Modules {
+ log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path)
+
+ // Go through all the resources in this module.
+ for name, rs := range ms.Resources {
+ log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs)
+
+ // Add the resource to the graph
+ addr, err := parseResourceAddressInternal(name)
+ if err != nil {
+ panic(fmt.Sprintf(
+ "Error parsing internal name, this is a bug: %q", name))
+ }
+
+ // Very important: add the module path for this resource to
+ // the address. Remove "root" from it.
+ addr.Path = ms.Path[1:]
+
+ // Add the resource to the graph
+ abstract := &NodeAbstractResource{Addr: addr}
+ var node dag.Vertex = abstract
+ if f := t.Concrete; f != nil {
+ node = f(abstract)
+ }
+
+ nodes = append(nodes, node)
+ }
+ }
+
+ // Add all the nodes to the graph
+ for _, n := range nodes {
+ g.Add(n)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
new file mode 100644
index 00000000..225ac4b4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
@@ -0,0 +1,144 @@
+package terraform
+
+import (
+ "log"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeTargetable is an interface for graph nodes to implement when they
+// need to be told about incoming targets. This is useful for nodes that need
+// to respect targets as they dynamically expand. Note that the list of targets
+// provided will contain every target provided, and each implementing graph
+// node must filter this list to targets considered relevant.
+type GraphNodeTargetable interface {
+ SetTargets([]ResourceAddress)
+}
+
+// TargetsTransformer is a GraphTransformer that, when the user specifies a
+// list of resources to target, limits the graph to only those resources and
+// their dependencies.
+type TargetsTransformer struct {
+ // List of targeted resource names specified by the user
+ Targets []string
+
+ // List of parsed targets, provided by callers like ResourceCountTransform
+ // that already have the targets parsed
+ ParsedTargets []ResourceAddress
+
+ // Set to true when we're in a `terraform destroy` or a
+ // `terraform plan -destroy`
+ Destroy bool
+}
+
+func (t *TargetsTransformer) Transform(g *Graph) error {
+ if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 {
+ addrs, err := t.parseTargetAddresses()
+ if err != nil {
+ return err
+ }
+
+ t.ParsedTargets = addrs
+ }
+
+ if len(t.ParsedTargets) > 0 {
+ targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets)
+ if err != nil {
+ return err
+ }
+
+ for _, v := range g.Vertices() {
+ removable := false
+ if _, ok := v.(GraphNodeResource); ok {
+ removable = true
+ }
+ if vr, ok := v.(RemovableIfNotTargeted); ok {
+ removable = vr.RemoveIfNotTargeted()
+ }
+ if removable && !targetedNodes.Include(v) {
+ log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v))
+ g.Remove(v)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) {
+ addrs := make([]ResourceAddress, len(t.Targets))
+ for i, target := range t.Targets {
+ ta, err := ParseResourceAddress(target)
+ if err != nil {
+ return nil, err
+ }
+ addrs[i] = *ta
+ }
+
+ return addrs, nil
+}
+
+// Returns the list of targeted nodes. A targeted node is either addressed
+// directly, or is an Ancestor of a targeted node. Destroy mode keeps
+// Descendents instead of Ancestors.
+func (t *TargetsTransformer) selectTargetedNodes(
+ g *Graph, addrs []ResourceAddress) (*dag.Set, error) {
+ targetedNodes := new(dag.Set)
+ for _, v := range g.Vertices() {
+ if t.nodeIsTarget(v, addrs) {
+ targetedNodes.Add(v)
+
+ // We inform nodes that ask about the list of targets - helps for nodes
+ // that need to dynamically expand. Note that this only occurs for nodes
+ // that are already directly targeted.
+ if tn, ok := v.(GraphNodeTargetable); ok {
+ tn.SetTargets(addrs)
+ }
+
+ var deps *dag.Set
+ var err error
+ if t.Destroy {
+ deps, err = g.Descendents(v)
+ } else {
+ deps, err = g.Ancestors(v)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ for _, d := range deps.List() {
+ targetedNodes.Add(d)
+ }
+ }
+ }
+
+ return targetedNodes, nil
+}
+
+func (t *TargetsTransformer) nodeIsTarget(
+ v dag.Vertex, addrs []ResourceAddress) bool {
+ r, ok := v.(GraphNodeResource)
+ if !ok {
+ return false
+ }
+
+ addr := r.ResourceAddr()
+ for _, targetAddr := range addrs {
+ if targetAddr.Equals(addr) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// RemovableIfNotTargeted is a special interface for graph nodes that
+// aren't directly addressable, but need to be removed from the graph when they
+// are not targeted. (Nodes that are not directly targeted end up in the set of
+// targeted nodes because something that _is_ targeted depends on them.) The
+// initial use case for this interface is GraphNodeConfigVariable, which was
+// having trouble interpolating for module variables in targeted scenarios that
+// filtered out the resource node being referenced.
+type RemovableIfNotTargeted interface {
+ RemoveIfNotTargeted() bool
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
new file mode 100644
index 00000000..21842789
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
@@ -0,0 +1,20 @@
+package terraform
+
+// TransitiveReductionTransformer is a GraphTransformer that performs
+// finds the transitive reduction of the graph. For a definition of
+// transitive reduction, see Wikipedia.
+type TransitiveReductionTransformer struct{}
+
+func (t *TransitiveReductionTransformer) Transform(g *Graph) error {
+ // If the graph isn't valid, skip the transitive reduction.
+ // We don't error here because Terraform itself handles graph
+ // validation in a better way, or we assume it does.
+ if err := g.Validate(); err != nil {
+ return nil
+ }
+
+ // Do it
+ g.TransitiveReduction()
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
new file mode 100644
index 00000000..b31e2c76
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
@@ -0,0 +1,40 @@
+package terraform
+
+import (
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// RootVariableTransformer is a GraphTransformer that adds all the root
+// variables to the graph.
+//
+// Root variables are currently no-ops but they must be added to the
+// graph since downstream things that depend on them must be able to
+// reach them.
+type RootVariableTransformer struct {
+ Module *module.Tree
+}
+
+func (t *RootVariableTransformer) Transform(g *Graph) error {
+ // If no config, no variables
+ if t.Module == nil {
+ return nil
+ }
+
+ // If we have no vars, we're done!
+ vars := t.Module.Config().Variables
+ if len(vars) == 0 {
+ return nil
+ }
+
+ // Add all variables here
+ for _, v := range vars {
+ node := &NodeRootVariable{
+ Config: v,
+ }
+
+ // Add it!
+ g.Add(node)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
new file mode 100644
index 00000000..6b1293fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
@@ -0,0 +1,44 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/dag"
+)
+
+// VertexTransformer is a GraphTransformer that transforms vertices
+// using the GraphVertexTransformers. The Transforms are run in sequential
+// order. If a transform replaces a vertex then the next transform will see
+// the new vertex.
+type VertexTransformer struct {
+ Transforms []GraphVertexTransformer
+}
+
+func (t *VertexTransformer) Transform(g *Graph) error {
+ for _, v := range g.Vertices() {
+ for _, vt := range t.Transforms {
+ newV, err := vt.Transform(v)
+ if err != nil {
+ return err
+ }
+
+ // If the vertex didn't change, then don't do anything more
+ if newV == v {
+ continue
+ }
+
+ // Vertex changed, replace it within the graph
+ if ok := g.Replace(v, newV); !ok {
+ // This should never happen, big problem
+ return fmt.Errorf(
+ "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v",
+ dag.VertexName(v), dag.VertexName(newV), v, newV)
+ }
+
+ // Replace v so that future transforms use the proper vertex
+ v = newV
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
new file mode 100644
index 00000000..7c874592
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
@@ -0,0 +1,26 @@
+package terraform
+
+// UIInput is the interface that must be implemented to ask for input
+// from this user. This should forward the request to wherever the user
+// inputs things to ask for values.
+type UIInput interface {
+ Input(*InputOpts) (string, error)
+}
+
+// InputOpts are options for asking for input.
+type InputOpts struct {
+ // Id is a unique ID for the question being asked that might be
+ // used for logging or to look up a prior answered question.
+ Id string
+
+ // Query is a human-friendly question for inputting this value.
+ Query string
+
+ // Description is a description about what this option is. Be wary
+ // that this will probably be in a terminal so split lines as you see
+ // necessary.
+ Description string
+
+ // Default will be the value returned if no data is entered.
+ Default string
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
new file mode 100644
index 00000000..e3a07efa
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
@@ -0,0 +1,23 @@
+package terraform
+
+// MockUIInput is an implementation of UIInput that can be used for tests.
+type MockUIInput struct {
+ InputCalled bool
+ InputOpts *InputOpts
+ InputReturnMap map[string]string
+ InputReturnString string
+ InputReturnError error
+ InputFn func(*InputOpts) (string, error)
+}
+
+func (i *MockUIInput) Input(opts *InputOpts) (string, error) {
+ i.InputCalled = true
+ i.InputOpts = opts
+ if i.InputFn != nil {
+ return i.InputFn(opts)
+ }
+ if i.InputReturnMap != nil {
+ return i.InputReturnMap[opts.Id], i.InputReturnError
+ }
+ return i.InputReturnString, i.InputReturnError
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
new file mode 100644
index 00000000..2207d1d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
@@ -0,0 +1,19 @@
+package terraform
+
+import (
+ "fmt"
+)
+
+// PrefixUIInput is an implementation of UIInput that prefixes the ID
+// with a string, allowing queries to be namespaced.
+type PrefixUIInput struct {
+ IdPrefix string
+ QueryPrefix string
+ UIInput UIInput
+}
+
+func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) {
+ opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id)
+ opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query)
+ return i.UIInput.Input(opts)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
new file mode 100644
index 00000000..84427c63
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
@@ -0,0 +1,7 @@
+package terraform
+
+// UIOutput is the interface that must be implemented to output
+// data to the end user.
+type UIOutput interface {
+ Output(string)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
new file mode 100644
index 00000000..135a91c5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
@@ -0,0 +1,9 @@
+package terraform
+
+type CallbackUIOutput struct {
+ OutputFn func(string)
+}
+
+func (o *CallbackUIOutput) Output(v string) {
+ o.OutputFn(v)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
new file mode 100644
index 00000000..7852bc42
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
@@ -0,0 +1,16 @@
+package terraform
+
+// MockUIOutput is an implementation of UIOutput that can be used for tests.
+type MockUIOutput struct {
+ OutputCalled bool
+ OutputMessage string
+ OutputFn func(string)
+}
+
+func (o *MockUIOutput) Output(v string) {
+ o.OutputCalled = true
+ o.OutputMessage = v
+ if o.OutputFn != nil {
+ o.OutputFn(v)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
new file mode 100644
index 00000000..878a0312
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
@@ -0,0 +1,15 @@
+package terraform
+
+// ProvisionerUIOutput is an implementation of UIOutput that calls a hook
+// for the output so that the hooks can handle it.
+type ProvisionerUIOutput struct {
+ Info *InstanceInfo
+ Type string
+ Hooks []Hook
+}
+
+func (o *ProvisionerUIOutput) Output(msg string) {
+ for _, h := range o.Hooks {
+ h.ProvisionOutput(o.Info, o.Type, msg)
+ }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go
new file mode 100644
index 00000000..f41f0d7d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/util.go
@@ -0,0 +1,93 @@
+package terraform
+
+import (
+ "sort"
+ "strings"
+)
+
+// Semaphore is a wrapper around a channel to provide
+// utility methods to clarify that we are treating the
+// channel as a semaphore
+type Semaphore chan struct{}
+
+// NewSemaphore creates a semaphore that allows up
+// to a given limit of simultaneous acquisitions
+func NewSemaphore(n int) Semaphore {
+ if n == 0 {
+ panic("semaphore with limit 0")
+ }
+ ch := make(chan struct{}, n)
+ return Semaphore(ch)
+}
+
+// Acquire is used to acquire an available slot.
+// Blocks until available.
+func (s Semaphore) Acquire() {
+ s <- struct{}{}
+}
+
+// TryAcquire is used to do a non-blocking acquire.
+// Returns a bool indicating success
+func (s Semaphore) TryAcquire() bool {
+ select {
+ case s <- struct{}{}:
+ return true
+ default:
+ return false
+ }
+}
+
+// Release is used to return a slot. Acquire must
+// be called as a pre-condition.
+func (s Semaphore) Release() {
+ select {
+ case <-s:
+ default:
+ panic("release without an acquire")
+ }
+}
+
+// resourceProvider returns the provider name for the given type.
+func resourceProvider(t, alias string) string {
+ if alias != "" {
+ return alias
+ }
+
+ idx := strings.IndexRune(t, '_')
+ if idx == -1 {
+ // If no underscores, the resource name is assumed to be
+ // also the provider name, e.g. if the provider exposes
+ // only a single resource of each type.
+ return t
+ }
+
+ return t[:idx]
+}
+
+// strSliceContains checks if a given string is contained in a slice
+// When anybody asks why Go needs generics, here you go.
+func strSliceContains(haystack []string, needle string) bool {
+ for _, s := range haystack {
+ if s == needle {
+ return true
+ }
+ }
+ return false
+}
+
+// deduplicate a slice of strings
+func uniqueStrings(s []string) []string {
+ if len(s) < 2 {
+ return s
+ }
+
+ sort.Strings(s)
+ result := make([]string, 1, len(s))
+ result[0] = s[0]
+ for i := 1; i < len(s); i++ {
+ if s[i] != result[len(result)-1] {
+ result = append(result, s[i])
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go
new file mode 100644
index 00000000..300f2adb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go
@@ -0,0 +1,166 @@
+package terraform
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/hilmapstructure"
+)
+
+// Variables returns the fully loaded set of variables to use with
+// ContextOpts and NewContext, loading any additional variables from
+// the environment or any other sources.
+//
+// The given module tree doesn't need to be loaded.
+func Variables(
+ m *module.Tree,
+ override map[string]interface{}) (map[string]interface{}, error) {
+ result := make(map[string]interface{})
+
+ // Variables are loaded in the following sequence. Each additional step
+ // will override conflicting variable keys from prior steps:
+ //
+ // * Take default values from config
+ // * Take values from TF_VAR_x env vars
+ // * Take values specified in the "override" param which is usually
+ // from -var, -var-file, etc.
+ //
+
+ // First load from the config
+ for _, v := range m.Config().Variables {
+ // If the var has no default, ignore
+ if v.Default == nil {
+ continue
+ }
+
+ // If the type isn't a string, we use it as-is since it is a rich type
+ if v.Type() != config.VariableTypeString {
+ result[v.Name] = v.Default
+ continue
+ }
+
+ // v.Default has already been parsed as HCL but it may be an int type
+ switch typedDefault := v.Default.(type) {
+ case string:
+ if typedDefault == "" {
+ continue
+ }
+ result[v.Name] = typedDefault
+ case int, int64:
+ result[v.Name] = fmt.Sprintf("%d", typedDefault)
+ case float32, float64:
+ result[v.Name] = fmt.Sprintf("%f", typedDefault)
+ case bool:
+ result[v.Name] = fmt.Sprintf("%t", typedDefault)
+ default:
+ panic(fmt.Sprintf(
+ "Unknown default var type: %T\n\n"+
+ "THIS IS A BUG. Please report it.",
+ v.Default))
+ }
+ }
+
+ // Load from env vars
+ for _, v := range os.Environ() {
+ if !strings.HasPrefix(v, VarEnvPrefix) {
+ continue
+ }
+
+ // Strip off the prefix and get the value after the first "="
+ idx := strings.Index(v, "=")
+ k := v[len(VarEnvPrefix):idx]
+ v = v[idx+1:]
+
+ // Override the configuration-default values. Note that *not* finding the variable
+ // in configuration is OK, as we don't want to preclude people from having multiple
+ // sets of TF_VAR_whatever in their environment even if it is a little weird.
+ for _, schema := range m.Config().Variables {
+ if schema.Name != k {
+ continue
+ }
+
+ varType := schema.Type()
+ varVal, err := parseVariableAsHCL(k, v, varType)
+ if err != nil {
+ return nil, err
+ }
+
+ switch varType {
+ case config.VariableTypeMap:
+ if err := varSetMap(result, k, varVal); err != nil {
+ return nil, err
+ }
+ default:
+ result[k] = varVal
+ }
+ }
+ }
+
+ // Load from overrides
+ for k, v := range override {
+ for _, schema := range m.Config().Variables {
+ if schema.Name != k {
+ continue
+ }
+
+ switch schema.Type() {
+ case config.VariableTypeList:
+ result[k] = v
+ case config.VariableTypeMap:
+ if err := varSetMap(result, k, v); err != nil {
+ return nil, err
+ }
+ case config.VariableTypeString:
+ // Convert to a string and set. We don't catch any errors
+ // here because the validation step later should catch
+ // any type errors.
+ var strVal string
+ if err := hilmapstructure.WeakDecode(v, &strVal); err == nil {
+ result[k] = strVal
+ } else {
+ result[k] = v
+ }
+ default:
+ panic(fmt.Sprintf(
+ "Unhandled var type: %T\n\n"+
+ "THIS IS A BUG. Please report it.",
+ schema.Type()))
+ }
+ }
+ }
+
+ return result, nil
+}
+
+// varSetMap sets or merges the map in "v" with the key "k" in the
+// "current" set of variables. This is just a private function to remove
+// duplicate logic in Variables
+func varSetMap(current map[string]interface{}, k string, v interface{}) error {
+ existing, ok := current[k]
+ if !ok {
+ current[k] = v
+ return nil
+ }
+
+ existingMap, ok := existing.(map[string]interface{})
+ if !ok {
+ panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k))
+ }
+
+ switch typedV := v.(type) {
+ case []map[string]interface{}:
+ for newKey, newVal := range typedV[0] {
+ existingMap[newKey] = newVal
+ }
+ case map[string]interface{}:
+ for newKey, newVal := range typedV {
+ existingMap[newKey] = newVal
+ }
+ default:
+ return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v))
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go
new file mode 100644
index 00000000..e184dc5a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/version.go
@@ -0,0 +1,31 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-version"
+)
+
+// The main version number that is being run at the moment.
+const Version = "0.9.4"
+
+// A pre-release marker for the version. If this is "" (empty string)
+// then it means that it is a final release. Otherwise, this is a pre-release
+// such as "dev" (in development), "beta", "rc1", etc.
+const VersionPrerelease = ""
+
+// SemVersion is an instance of version.Version. This has the secondary
+// benefit of verifying during tests and init time that our version is a
+// proper semantic version, which should always be the case.
+var SemVersion = version.Must(version.NewVersion(Version))
+
+// VersionHeader is the header name used to send the current terraform version
+// in http requests.
+const VersionHeader = "Terraform-Version"
+
+func VersionString() string {
+ if VersionPrerelease != "" {
+ return fmt.Sprintf("%s-%s", Version, VersionPrerelease)
+ }
+ return Version
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
new file mode 100644
index 00000000..3cbbf560
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
@@ -0,0 +1,69 @@
+package terraform
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/go-version"
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/config/module"
+)
+
+// checkRequiredVersion verifies that any version requirements specified by
+// the configuration are met.
+//
+// This checks the root module as well as any additional version requirements
+// from child modules.
+//
+// This is tested in context_test.go.
+func checkRequiredVersion(m *module.Tree) error {
+ // Check any children
+ for _, c := range m.Children() {
+ if err := checkRequiredVersion(c); err != nil {
+ return err
+ }
+ }
+
+ var tf *config.Terraform
+ if c := m.Config(); c != nil {
+ tf = c.Terraform
+ }
+
+ // If there is no Terraform config or the required version isn't set,
+ // we move on.
+ if tf == nil || tf.RequiredVersion == "" {
+ return nil
+ }
+
+ // Path for errors
+ module := "root"
+ if path := normalizeModulePath(m.Path()); len(path) > 1 {
+ module = modulePrefixStr(path)
+ }
+
+ // Check this version requirement of this module
+ cs, err := version.NewConstraint(tf.RequiredVersion)
+ if err != nil {
+ return fmt.Errorf(
+ "%s: terraform.required_version %q syntax error: %s",
+ module,
+ tf.RequiredVersion, err)
+ }
+
+ if !cs.Check(SemVersion) {
+ return fmt.Errorf(
+ "The currently running version of Terraform doesn't meet the\n"+
+ "version requirements explicitly specified by the configuration.\n"+
+ "Please use the required version or update the configuration.\n"+
+ "Note that version requirements are usually set for a reason, so\n"+
+ "we recommend verifying with whoever set the version requirements\n"+
+ "prior to making any manual changes.\n\n"+
+ " Module: %s\n"+
+ " Required version: %s\n"+
+ " Current version: %s",
+ module,
+ tf.RequiredVersion,
+ SemVersion)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
new file mode 100644
index 00000000..cbd78dd9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT.
+
+package terraform
+
+import "fmt"
+
+const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport"
+
+var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96}
+
+func (i walkOperation) String() string {
+ if i >= walkOperation(len(_walkOperation_index)-1) {
+ return fmt.Sprintf("walkOperation(%d)", i)
+ }
+ return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE
new file mode 100644
index 00000000..f0e5c79e
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0. \ No newline at end of file
diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md
new file mode 100644
index 00000000..d4db7fc9
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/README.md
@@ -0,0 +1,86 @@
+# Yamux
+
+Yamux (Yet another Multiplexer) is a multiplexing library for Golang.
+It relies on an underlying connection to provide reliability
+and ordering, such as TCP or Unix domain sockets, and provides
+stream-oriented multiplexing. It is inspired by SPDY but is not
+interoperable with it.
+
+Yamux features include:
+
+* Bi-directional streams
+ * Streams can be opened by either client or server
+ * Useful for NAT traversal
+ * Server-side push support
+* Flow control
+ * Avoid starvation
+ * Back-pressure to prevent overwhelming a receiver
+* Keep Alives
+ * Enables persistent connections over a load balancer
+* Efficient
+ * Enables thousands of logical streams with low overhead
+
+## Documentation
+
+For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux).
+
+## Specification
+
+The full specification for Yamux is provided in the `spec.md` file.
+It can be used as a guide to implementors of interoperable libraries.
+
+## Usage
+
+Using Yamux is remarkably simple:
+
+```go
+
+func client() {
+ // Get a TCP connection
+ conn, err := net.Dial(...)
+ if err != nil {
+ panic(err)
+ }
+
+ // Setup client side of yamux
+ session, err := yamux.Client(conn, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ // Open a new stream
+ stream, err := session.Open()
+ if err != nil {
+ panic(err)
+ }
+
+ // Stream implements net.Conn
+ stream.Write([]byte("ping"))
+}
+
+func server() {
+ // Accept a TCP connection
+ conn, err := listener.Accept()
+ if err != nil {
+ panic(err)
+ }
+
+ // Setup server side of yamux
+ session, err := yamux.Server(conn, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ // Accept a stream
+ stream, err := session.Accept()
+ if err != nil {
+ panic(err)
+ }
+
+ // Listen for a message
+ buf := make([]byte, 4)
+ stream.Read(buf)
+}
+
+```
+
diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go
new file mode 100644
index 00000000..be6ebca9
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/addr.go
@@ -0,0 +1,60 @@
+package yamux
+
+import (
+ "fmt"
+ "net"
+)
+
+// hasAddr is used to get the address from the underlying connection
+type hasAddr interface {
+ LocalAddr() net.Addr
+ RemoteAddr() net.Addr
+}
+
+// yamuxAddr is used when we cannot get the underlying address
+type yamuxAddr struct {
+ Addr string
+}
+
+func (*yamuxAddr) Network() string {
+ return "yamux"
+}
+
+func (y *yamuxAddr) String() string {
+ return fmt.Sprintf("yamux:%s", y.Addr)
+}
+
+// Addr is used to get the address of the listener.
+func (s *Session) Addr() net.Addr {
+ return s.LocalAddr()
+}
+
+// LocalAddr is used to get the local address of the
+// underlying connection.
+func (s *Session) LocalAddr() net.Addr {
+ addr, ok := s.conn.(hasAddr)
+ if !ok {
+ return &yamuxAddr{"local"}
+ }
+ return addr.LocalAddr()
+}
+
+// RemoteAddr is used to get the address of remote end
+// of the underlying connection
+func (s *Session) RemoteAddr() net.Addr {
+ addr, ok := s.conn.(hasAddr)
+ if !ok {
+ return &yamuxAddr{"remote"}
+ }
+ return addr.RemoteAddr()
+}
+
+// LocalAddr returns the local address
+func (s *Stream) LocalAddr() net.Addr {
+ return s.session.LocalAddr()
+}
+
+// LocalAddr returns the remote address
+func (s *Stream) RemoteAddr() net.Addr {
+ return s.session.RemoteAddr()
+}
diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go
new file mode 100644
index 00000000..4f529382
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/const.go
@@ -0,0 +1,157 @@
+package yamux
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+var (
+ // ErrInvalidVersion means we received a frame with an
+ // invalid version
+ ErrInvalidVersion = fmt.Errorf("invalid protocol version")
+
+ // ErrInvalidMsgType means we received a frame with an
+ // invalid message type
+ ErrInvalidMsgType = fmt.Errorf("invalid msg type")
+
+ // ErrSessionShutdown is used if there is a shutdown during
+ // an operation
+ ErrSessionShutdown = fmt.Errorf("session shutdown")
+
+ // ErrStreamsExhausted is returned if we have no more
+ // stream ids to issue
+ ErrStreamsExhausted = fmt.Errorf("streams exhausted")
+
+ // ErrDuplicateStream is used if a duplicate stream is
+ // opened inbound
+ ErrDuplicateStream = fmt.Errorf("duplicate stream initiated")
+
+ // ErrReceiveWindowExceeded indicates the window was exceeded
+ ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded")
+
+ // ErrTimeout is used when we reach an IO deadline
+ ErrTimeout = fmt.Errorf("i/o deadline reached")
+
+ // ErrStreamClosed is returned when using a closed stream
+ ErrStreamClosed = fmt.Errorf("stream closed")
+
+ // ErrUnexpectedFlag is set when we get an unexpected flag
+ ErrUnexpectedFlag = fmt.Errorf("unexpected flag")
+
+ // ErrRemoteGoAway is used when we get a go away from the other side
+ ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections")
+
+ // ErrConnectionReset is sent if a stream is reset. This can happen
+ // if the backlog is exceeded, or if there was a remote GoAway.
+ ErrConnectionReset = fmt.Errorf("connection reset")
+
+ // ErrConnectionWriteTimeout indicates that we hit the "safety valve"
+ // timeout writing to the underlying stream connection.
+ ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout")
+
+ // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close
+ ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout")
+)
+
+const (
+ // protoVersion is the only version we support
+ protoVersion uint8 = 0
+)
+
+const (
+ // Data is used for data frames. They are followed
+ // by length bytes worth of payload.
+ typeData uint8 = iota
+
+ // WindowUpdate is used to change the window of
+ // a given stream. The length indicates the delta
+ // update to the window.
+ typeWindowUpdate
+
+ // Ping is sent as a keep-alive or to measure
+ // the RTT. The StreamID and Length value are echoed
+ // back in the response.
+ typePing
+
+ // GoAway is sent to terminate a session. The StreamID
+ // should be 0 and the length is an error code.
+ typeGoAway
+)
+
+const (
+ // SYN is sent to signal a new stream. May
+ // be sent with a data payload
+ flagSYN uint16 = 1 << iota
+
+ // ACK is sent to acknowledge a new stream. May
+ // be sent with a data payload
+ flagACK
+
+ // FIN is sent to half-close the given stream.
+ // May be sent with a data payload.
+ flagFIN
+
+ // RST is used to hard close a given stream.
+ flagRST
+)
+
+const (
+ // initialStreamWindow is the initial stream window size
+ initialStreamWindow uint32 = 256 * 1024
+)
+
+const (
+ // goAwayNormal is sent on a normal termination
+ goAwayNormal uint32 = iota
+
+ // goAwayProtoErr sent on a protocol error
+ goAwayProtoErr
+
+ // goAwayInternalErr sent on an internal error
+ goAwayInternalErr
+)
+
+const (
+ sizeOfVersion = 1
+ sizeOfType = 1
+ sizeOfFlags = 2
+ sizeOfStreamID = 4
+ sizeOfLength = 4
+ headerSize = sizeOfVersion + sizeOfType + sizeOfFlags +
+ sizeOfStreamID + sizeOfLength
+)
+
+type header []byte
+
+func (h header) Version() uint8 {
+ return h[0]
+}
+
+func (h header) MsgType() uint8 {
+ return h[1]
+}
+
+func (h header) Flags() uint16 {
+ return binary.BigEndian.Uint16(h[2:4])
+}
+
+func (h header) StreamID() uint32 {
+ return binary.BigEndian.Uint32(h[4:8])
+}
+
+func (h header) Length() uint32 {
+ return binary.BigEndian.Uint32(h[8:12])
+}
+
+func (h header) String() string {
+ return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d",
+ h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length())
+}
+
+func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) {
+ h[0] = protoVersion
+ h[1] = msgType
+ binary.BigEndian.PutUint16(h[2:4], flags)
+ binary.BigEndian.PutUint32(h[4:8], streamID)
+ binary.BigEndian.PutUint32(h[8:12], length)
+}
diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go
new file mode 100644
index 00000000..7abc7c74
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/mux.go
@@ -0,0 +1,87 @@
+package yamux
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// Config is used to tune the Yamux session
+type Config struct {
+ // AcceptBacklog is used to limit how many streams may be
+ // waiting an accept.
+ AcceptBacklog int
+
+ // EnableKeepalive is used to do a period keep alive
+ // messages using a ping.
+ EnableKeepAlive bool
+
+ // KeepAliveInterval is how often to perform the keep alive
+ KeepAliveInterval time.Duration
+
+ // ConnectionWriteTimeout is meant to be a "safety valve" timeout after
+ // we which will suspect a problem with the underlying connection and
+ // close it. This is only applied to writes, where's there's generally
+ // an expectation that things will move along quickly.
+ ConnectionWriteTimeout time.Duration
+
+ // MaxStreamWindowSize is used to control the maximum
+ // window size that we allow for a stream.
+ MaxStreamWindowSize uint32
+
+ // LogOutput is used to control the log destination
+ LogOutput io.Writer
+}
+
+// DefaultConfig is used to return a default configuration
+func DefaultConfig() *Config {
+ return &Config{
+ AcceptBacklog: 256,
+ EnableKeepAlive: true,
+ KeepAliveInterval: 30 * time.Second,
+ ConnectionWriteTimeout: 10 * time.Second,
+ MaxStreamWindowSize: initialStreamWindow,
+ LogOutput: os.Stderr,
+ }
+}
+
+// VerifyConfig is used to verify the sanity of configuration
+func VerifyConfig(config *Config) error {
+ if config.AcceptBacklog <= 0 {
+ return fmt.Errorf("backlog must be positive")
+ }
+ if config.KeepAliveInterval == 0 {
+ return fmt.Errorf("keep-alive interval must be positive")
+ }
+ if config.MaxStreamWindowSize < initialStreamWindow {
+ return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow)
+ }
+ return nil
+}
+
+// Server is used to initialize a new server-side connection.
+// There must be at most one server-side connection. If a nil config is
+// provided, the DefaultConfiguration will be used.
+func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) {
+ if config == nil {
+ config = DefaultConfig()
+ }
+ if err := VerifyConfig(config); err != nil {
+ return nil, err
+ }
+ return newSession(config, conn, false), nil
+}
+
+// Client is used to initialize a new client-side connection.
+// There must be at most one client-side connection.
+func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) {
+ if config == nil {
+ config = DefaultConfig()
+ }
+
+ if err := VerifyConfig(config); err != nil {
+ return nil, err
+ }
+ return newSession(config, conn, true), nil
+}
diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go
new file mode 100644
index 00000000..e1798183
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/session.go
@@ -0,0 +1,623 @@
+package yamux
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "math"
+ "net"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// Session is used to wrap a reliable ordered connection and to
+// multiplex it into multiple streams.
+type Session struct {
+ // remoteGoAway indicates the remote side does
+ // not want futher connections. Must be first for alignment.
+ remoteGoAway int32
+
+ // localGoAway indicates that we should stop
+ // accepting futher connections. Must be first for alignment.
+ localGoAway int32
+
+ // nextStreamID is the next stream we should
+ // send. This depends if we are a client/server.
+ nextStreamID uint32
+
+ // config holds our configuration
+ config *Config
+
+ // logger is used for our logs
+ logger *log.Logger
+
+ // conn is the underlying connection
+ conn io.ReadWriteCloser
+
+ // bufRead is a buffered reader
+ bufRead *bufio.Reader
+
+ // pings is used to track inflight pings
+ pings map[uint32]chan struct{}
+ pingID uint32
+ pingLock sync.Mutex
+
+ // streams maps a stream id to a stream, and inflight has an entry
+ // for any outgoing stream that has not yet been established. Both are
+ // protected by streamLock.
+ streams map[uint32]*Stream
+ inflight map[uint32]struct{}
+ streamLock sync.Mutex
+
+ // synCh acts like a semaphore. It is sized to the AcceptBacklog which
+ // is assumed to be symmetric between the client and server. This allows
+ // the client to avoid exceeding the backlog and instead blocks the open.
+ synCh chan struct{}
+
+ // acceptCh is used to pass ready streams to the client
+ acceptCh chan *Stream
+
+ // sendCh is used to mark a stream as ready to send,
+ // or to send a header out directly.
+ sendCh chan sendReady
+
+ // recvDoneCh is closed when recv() exits to avoid a race
+ // between stream registration and stream shutdown
+ recvDoneCh chan struct{}
+
+ // shutdown is used to safely close a session
+ shutdown bool
+ shutdownErr error
+ shutdownCh chan struct{}
+ shutdownLock sync.Mutex
+}
+
+// sendReady is used to either mark a stream as ready
+// or to directly send a header
+type sendReady struct {
+ Hdr []byte
+ Body io.Reader
+ Err chan error
+}
+
+// newSession is used to construct a new session
+func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
+ s := &Session{
+ config: config,
+ logger: log.New(config.LogOutput, "", log.LstdFlags),
+ conn: conn,
+ bufRead: bufio.NewReader(conn),
+ pings: make(map[uint32]chan struct{}),
+ streams: make(map[uint32]*Stream),
+ inflight: make(map[uint32]struct{}),
+ synCh: make(chan struct{}, config.AcceptBacklog),
+ acceptCh: make(chan *Stream, config.AcceptBacklog),
+ sendCh: make(chan sendReady, 64),
+ recvDoneCh: make(chan struct{}),
+ shutdownCh: make(chan struct{}),
+ }
+ if client {
+ s.nextStreamID = 1
+ } else {
+ s.nextStreamID = 2
+ }
+ go s.recv()
+ go s.send()
+ if config.EnableKeepAlive {
+ go s.keepalive()
+ }
+ return s
+}
+
+// IsClosed does a safe check to see if we have shutdown
+func (s *Session) IsClosed() bool {
+ select {
+ case <-s.shutdownCh:
+ return true
+ default:
+ return false
+ }
+}
+
+// NumStreams returns the number of currently open streams
+func (s *Session) NumStreams() int {
+ s.streamLock.Lock()
+ num := len(s.streams)
+ s.streamLock.Unlock()
+ return num
+}
+
+// Open is used to create a new stream as a net.Conn
+func (s *Session) Open() (net.Conn, error) {
+ conn, err := s.OpenStream()
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+// OpenStream is used to create a new stream
+func (s *Session) OpenStream() (*Stream, error) {
+ if s.IsClosed() {
+ return nil, ErrSessionShutdown
+ }
+ if atomic.LoadInt32(&s.remoteGoAway) == 1 {
+ return nil, ErrRemoteGoAway
+ }
+
+ // Block if we have too many inflight SYNs
+ select {
+ case s.synCh <- struct{}{}:
+ case <-s.shutdownCh:
+ return nil, ErrSessionShutdown
+ }
+
+GET_ID:
+ // Get an ID, and check for stream exhaustion
+ id := atomic.LoadUint32(&s.nextStreamID)
+ if id >= math.MaxUint32-1 {
+ return nil, ErrStreamsExhausted
+ }
+ if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) {
+ goto GET_ID
+ }
+
+ // Register the stream
+ stream := newStream(s, id, streamInit)
+ s.streamLock.Lock()
+ s.streams[id] = stream
+ s.inflight[id] = struct{}{}
+ s.streamLock.Unlock()
+
+ // Send the window update to create
+ if err := stream.sendWindowUpdate(); err != nil {
+ select {
+ case <-s.synCh:
+ default:
+ s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore")
+ }
+ return nil, err
+ }
+ return stream, nil
+}
+
+// Accept is used to block until the next available stream
+// is ready to be accepted.
+func (s *Session) Accept() (net.Conn, error) {
+ conn, err := s.AcceptStream()
+ if err != nil {
+ return nil, err
+ }
+ return conn, err
+}
+
+// AcceptStream is used to block until the next available stream
+// is ready to be accepted.
+func (s *Session) AcceptStream() (*Stream, error) {
+ select {
+ case stream := <-s.acceptCh:
+ if err := stream.sendWindowUpdate(); err != nil {
+ return nil, err
+ }
+ return stream, nil
+ case <-s.shutdownCh:
+ return nil, s.shutdownErr
+ }
+}
+
+// Close is used to close the session and all streams.
+// Attempts to send a GoAway before closing the connection.
+func (s *Session) Close() error {
+ s.shutdownLock.Lock()
+ defer s.shutdownLock.Unlock()
+
+ if s.shutdown {
+ return nil
+ }
+ s.shutdown = true
+ if s.shutdownErr == nil {
+ s.shutdownErr = ErrSessionShutdown
+ }
+ close(s.shutdownCh)
+ s.conn.Close()
+ <-s.recvDoneCh
+
+ s.streamLock.Lock()
+ defer s.streamLock.Unlock()
+ for _, stream := range s.streams {
+ stream.forceClose()
+ }
+ return nil
+}
+
+// exitErr is used to handle an error that is causing the
+// session to terminate.
+func (s *Session) exitErr(err error) {
+ s.shutdownLock.Lock()
+ if s.shutdownErr == nil {
+ s.shutdownErr = err
+ }
+ s.shutdownLock.Unlock()
+ s.Close()
+}
+
+// GoAway can be used to prevent accepting further
+// connections. It does not close the underlying conn.
+func (s *Session) GoAway() error {
+ return s.waitForSend(s.goAway(goAwayNormal), nil)
+}
+
+// goAway is used to send a goAway message
+func (s *Session) goAway(reason uint32) header {
+ atomic.SwapInt32(&s.localGoAway, 1)
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typeGoAway, 0, 0, reason)
+ return hdr
+}
+
+// Ping is used to measure the RTT response time
+func (s *Session) Ping() (time.Duration, error) {
+ // Get a channel for the ping
+ ch := make(chan struct{})
+
+ // Get a new ping id, mark as pending
+ s.pingLock.Lock()
+ id := s.pingID
+ s.pingID++
+ s.pings[id] = ch
+ s.pingLock.Unlock()
+
+ // Send the ping request
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typePing, flagSYN, 0, id)
+ if err := s.waitForSend(hdr, nil); err != nil {
+ return 0, err
+ }
+
+ // Wait for a response
+ start := time.Now()
+ select {
+ case <-ch:
+ case <-time.After(s.config.ConnectionWriteTimeout):
+ s.pingLock.Lock()
+ delete(s.pings, id) // Ignore it if a response comes later.
+ s.pingLock.Unlock()
+ return 0, ErrTimeout
+ case <-s.shutdownCh:
+ return 0, ErrSessionShutdown
+ }
+
+ // Compute the RTT
+ return time.Now().Sub(start), nil
+}
+
+// keepalive is a long running goroutine that periodically does
+// a ping to keep the connection alive.
+func (s *Session) keepalive() {
+ for {
+ select {
+ case <-time.After(s.config.KeepAliveInterval):
+ _, err := s.Ping()
+ if err != nil {
+ s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
+ s.exitErr(ErrKeepAliveTimeout)
+ return
+ }
+ case <-s.shutdownCh:
+ return
+ }
+ }
+}
+
+// waitForSendErr waits to send a header, checking for a potential shutdown
+func (s *Session) waitForSend(hdr header, body io.Reader) error {
+ errCh := make(chan error, 1)
+ return s.waitForSendErr(hdr, body, errCh)
+}
+
+// waitForSendErr waits to send a header with optional data, checking for a
+// potential shutdown. Since there's the expectation that sends can happen
+// in a timely manner, we enforce the connection write timeout here.
+func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error {
+ timer := time.NewTimer(s.config.ConnectionWriteTimeout)
+ defer timer.Stop()
+
+ ready := sendReady{Hdr: hdr, Body: body, Err: errCh}
+ select {
+ case s.sendCh <- ready:
+ case <-s.shutdownCh:
+ return ErrSessionShutdown
+ case <-timer.C:
+ return ErrConnectionWriteTimeout
+ }
+
+ select {
+ case err := <-errCh:
+ return err
+ case <-s.shutdownCh:
+ return ErrSessionShutdown
+ case <-timer.C:
+ return ErrConnectionWriteTimeout
+ }
+}
+
+// sendNoWait does a send without waiting. Since there's the expectation that
+// the send happens right here, we enforce the connection write timeout if we
+// can't queue the header to be sent.
+func (s *Session) sendNoWait(hdr header) error {
+ timer := time.NewTimer(s.config.ConnectionWriteTimeout)
+ defer timer.Stop()
+
+ select {
+ case s.sendCh <- sendReady{Hdr: hdr}:
+ return nil
+ case <-s.shutdownCh:
+ return ErrSessionShutdown
+ case <-timer.C:
+ return ErrConnectionWriteTimeout
+ }
+}
+
+// send is a long running goroutine that sends data
+func (s *Session) send() {
+ for {
+ select {
+ case ready := <-s.sendCh:
+ // Send a header if ready
+ if ready.Hdr != nil {
+ sent := 0
+ for sent < len(ready.Hdr) {
+ n, err := s.conn.Write(ready.Hdr[sent:])
+ if err != nil {
+ s.logger.Printf("[ERR] yamux: Failed to write header: %v", err)
+ asyncSendErr(ready.Err, err)
+ s.exitErr(err)
+ return
+ }
+ sent += n
+ }
+ }
+
+ // Send data from a body if given
+ if ready.Body != nil {
+ _, err := io.Copy(s.conn, ready.Body)
+ if err != nil {
+ s.logger.Printf("[ERR] yamux: Failed to write body: %v", err)
+ asyncSendErr(ready.Err, err)
+ s.exitErr(err)
+ return
+ }
+ }
+
+ // No error, successful send
+ asyncSendErr(ready.Err, nil)
+ case <-s.shutdownCh:
+ return
+ }
+ }
+}
+
+// recv is a long running goroutine that accepts new data
+func (s *Session) recv() {
+ if err := s.recvLoop(); err != nil {
+ s.exitErr(err)
+ }
+}
+
+// recvLoop continues to receive data until a fatal error is encountered
+func (s *Session) recvLoop() error {
+ defer close(s.recvDoneCh)
+ hdr := header(make([]byte, headerSize))
+ var handler func(header) error
+ for {
+ // Read the header
+ if _, err := io.ReadFull(s.bufRead, hdr); err != nil {
+ if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") {
+ s.logger.Printf("[ERR] yamux: Failed to read header: %v", err)
+ }
+ return err
+ }
+
+ // Verify the version
+ if hdr.Version() != protoVersion {
+ s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version())
+ return ErrInvalidVersion
+ }
+
+ // Switch on the type
+ switch hdr.MsgType() {
+ case typeData:
+ handler = s.handleStreamMessage
+ case typeWindowUpdate:
+ handler = s.handleStreamMessage
+ case typeGoAway:
+ handler = s.handleGoAway
+ case typePing:
+ handler = s.handlePing
+ default:
+ return ErrInvalidMsgType
+ }
+
+ // Invoke the handler
+ if err := handler(hdr); err != nil {
+ return err
+ }
+ }
+}
+
+// handleStreamMessage handles either a data or window update frame
+func (s *Session) handleStreamMessage(hdr header) error {
+ // Check for a new stream creation
+ id := hdr.StreamID()
+ flags := hdr.Flags()
+ if flags&flagSYN == flagSYN {
+ if err := s.incomingStream(id); err != nil {
+ return err
+ }
+ }
+
+ // Get the stream
+ s.streamLock.Lock()
+ stream := s.streams[id]
+ s.streamLock.Unlock()
+
+ // If we do not have a stream, likely we sent a RST
+ if stream == nil {
+ // Drain any data on the wire
+ if hdr.MsgType() == typeData && hdr.Length() > 0 {
+ s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id)
+ if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil {
+ s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err)
+ return nil
+ }
+ } else {
+ s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr)
+ }
+ return nil
+ }
+
+ // Check if this is a window update
+ if hdr.MsgType() == typeWindowUpdate {
+ if err := stream.incrSendWindow(hdr, flags); err != nil {
+ if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
+ s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
+ }
+ return err
+ }
+ return nil
+ }
+
+ // Read the new data
+ if err := stream.readData(hdr, flags, s.bufRead); err != nil {
+ if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
+ s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
+ }
+ return err
+ }
+ return nil
+}
+
+// handlePing is invokde for a typePing frame
+func (s *Session) handlePing(hdr header) error {
+ flags := hdr.Flags()
+ pingID := hdr.Length()
+
+ // Check if this is a query, respond back in a separate context so we
+ // don't interfere with the receiving thread blocking for the write.
+ if flags&flagSYN == flagSYN {
+ go func() {
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typePing, flagACK, 0, pingID)
+ if err := s.sendNoWait(hdr); err != nil {
+ s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err)
+ }
+ }()
+ return nil
+ }
+
+ // Handle a response
+ s.pingLock.Lock()
+ ch := s.pings[pingID]
+ if ch != nil {
+ delete(s.pings, pingID)
+ close(ch)
+ }
+ s.pingLock.Unlock()
+ return nil
+}
+
+// handleGoAway is invokde for a typeGoAway frame
+func (s *Session) handleGoAway(hdr header) error {
+ code := hdr.Length()
+ switch code {
+ case goAwayNormal:
+ atomic.SwapInt32(&s.remoteGoAway, 1)
+ case goAwayProtoErr:
+ s.logger.Printf("[ERR] yamux: received protocol error go away")
+ return fmt.Errorf("yamux protocol error")
+ case goAwayInternalErr:
+ s.logger.Printf("[ERR] yamux: received internal error go away")
+ return fmt.Errorf("remote yamux internal error")
+ default:
+ s.logger.Printf("[ERR] yamux: received unexpected go away")
+ return fmt.Errorf("unexpected go away received")
+ }
+ return nil
+}
+
+// incomingStream is used to create a new incoming stream
+func (s *Session) incomingStream(id uint32) error {
+ // Reject immediately if we are doing a go away
+ if atomic.LoadInt32(&s.localGoAway) == 1 {
+ hdr := header(make([]byte, headerSize))
+ hdr.encode(typeWindowUpdate, flagRST, id, 0)
+ return s.sendNoWait(hdr)
+ }
+
+ // Allocate a new stream
+ stream := newStream(s, id, streamSYNReceived)
+
+ s.streamLock.Lock()
+ defer s.streamLock.Unlock()
+
+ // Check if stream already exists
+ if _, ok := s.streams[id]; ok {
+ s.logger.Printf("[ERR] yamux: duplicate stream declared")
+ if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
+ s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
+ }
+ return ErrDuplicateStream
+ }
+
+ // Register the stream
+ s.streams[id] = stream
+
+ // Check if we've exceeded the backlog
+ select {
+ case s.acceptCh <- stream:
+ return nil
+ default:
+ // Backlog exceeded! RST the stream
+ s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset")
+ delete(s.streams, id)
+ stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0)
+ return s.sendNoWait(stream.sendHdr)
+ }
+}
+
+// closeStream is used to close a stream once both sides have
+// issued a close. If there was an in-flight SYN and the stream
+// was not yet established, then this will give the credit back.
+func (s *Session) closeStream(id uint32) {
+ s.streamLock.Lock()
+ if _, ok := s.inflight[id]; ok {
+ select {
+ case <-s.synCh:
+ default:
+ s.logger.Printf("[ERR] yamux: SYN tracking out of sync")
+ }
+ }
+ delete(s.streams, id)
+ s.streamLock.Unlock()
+}
+
+// establishStream is used to mark a stream that was in the
+// SYN Sent state as established.
+func (s *Session) establishStream(id uint32) {
+ s.streamLock.Lock()
+ if _, ok := s.inflight[id]; ok {
+ delete(s.inflight, id)
+ } else {
+ s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)")
+ }
+ select {
+ case <-s.synCh:
+ default:
+ s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)")
+ }
+ s.streamLock.Unlock()
+}
diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go
new file mode 100644
index 00000000..d216e281
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/stream.go
@@ -0,0 +1,457 @@
+package yamux
+
+import (
+ "bytes"
+ "io"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type streamState int
+
+const (
+ streamInit streamState = iota
+ streamSYNSent
+ streamSYNReceived
+ streamEstablished
+ streamLocalClose
+ streamRemoteClose
+ streamClosed
+ streamReset
+)
+
+// Stream is used to represent a logical stream
+// within a session.
+type Stream struct {
+ recvWindow uint32
+ sendWindow uint32
+
+ id uint32
+ session *Session
+
+ state streamState
+ stateLock sync.Mutex
+
+ recvBuf *bytes.Buffer
+ recvLock sync.Mutex
+
+ controlHdr header
+ controlErr chan error
+ controlHdrLock sync.Mutex
+
+ sendHdr header
+ sendErr chan error
+ sendLock sync.Mutex
+
+ recvNotifyCh chan struct{}
+ sendNotifyCh chan struct{}
+
+ readDeadline time.Time
+ writeDeadline time.Time
+}
+
+// newStream is used to construct a new stream within
+// a given session for an ID
+func newStream(session *Session, id uint32, state streamState) *Stream {
+ s := &Stream{
+ id: id,
+ session: session,
+ state: state,
+ controlHdr: header(make([]byte, headerSize)),
+ controlErr: make(chan error, 1),
+ sendHdr: header(make([]byte, headerSize)),
+ sendErr: make(chan error, 1),
+ recvWindow: initialStreamWindow,
+ sendWindow: initialStreamWindow,
+ recvNotifyCh: make(chan struct{}, 1),
+ sendNotifyCh: make(chan struct{}, 1),
+ }
+ return s
+}
+
+// Session returns the associated stream session
+func (s *Stream) Session() *Session {
+ return s.session
+}
+
+// StreamID returns the ID of this stream
+func (s *Stream) StreamID() uint32 {
+ return s.id
+}
+
+// Read is used to read from the stream
+func (s *Stream) Read(b []byte) (n int, err error) {
+ defer asyncNotify(s.recvNotifyCh)
+START:
+ s.stateLock.Lock()
+ switch s.state {
+ case streamLocalClose:
+ fallthrough
+ case streamRemoteClose:
+ fallthrough
+ case streamClosed:
+ s.recvLock.Lock()
+ if s.recvBuf == nil || s.recvBuf.Len() == 0 {
+ s.recvLock.Unlock()
+ s.stateLock.Unlock()
+ return 0, io.EOF
+ }
+ s.recvLock.Unlock()
+ case streamReset:
+ s.stateLock.Unlock()
+ return 0, ErrConnectionReset
+ }
+ s.stateLock.Unlock()
+
+ // If there is no data available, block
+ s.recvLock.Lock()
+ if s.recvBuf == nil || s.recvBuf.Len() == 0 {
+ s.recvLock.Unlock()
+ goto WAIT
+ }
+
+ // Read any bytes
+ n, _ = s.recvBuf.Read(b)
+ s.recvLock.Unlock()
+
+ // Send a window update potentially
+ err = s.sendWindowUpdate()
+ return n, err
+
+WAIT:
+ var timeout <-chan time.Time
+ var timer *time.Timer
+ if !s.readDeadline.IsZero() {
+ delay := s.readDeadline.Sub(time.Now())
+ timer = time.NewTimer(delay)
+ timeout = timer.C
+ }
+ select {
+ case <-s.recvNotifyCh:
+ if timer != nil {
+ timer.Stop()
+ }
+ goto START
+ case <-timeout:
+ return 0, ErrTimeout
+ }
+}
+
+// Write is used to write to the stream
+func (s *Stream) Write(b []byte) (n int, err error) {
+ s.sendLock.Lock()
+ defer s.sendLock.Unlock()
+ total := 0
+ for total < len(b) {
+ n, err := s.write(b[total:])
+ total += n
+ if err != nil {
+ return total, err
+ }
+ }
+ return total, nil
+}
+
+// write is used to write to the stream, may return on
+// a short write.
+func (s *Stream) write(b []byte) (n int, err error) {
+ var flags uint16
+ var max uint32
+ var body io.Reader
+START:
+ s.stateLock.Lock()
+ switch s.state {
+ case streamLocalClose:
+ fallthrough
+ case streamClosed:
+ s.stateLock.Unlock()
+ return 0, ErrStreamClosed
+ case streamReset:
+ s.stateLock.Unlock()
+ return 0, ErrConnectionReset
+ }
+ s.stateLock.Unlock()
+
+ // If there is no data available, block
+ window := atomic.LoadUint32(&s.sendWindow)
+ if window == 0 {
+ goto WAIT
+ }
+
+ // Determine the flags if any
+ flags = s.sendFlags()
+
+ // Send up to our send window
+ max = min(window, uint32(len(b)))
+ body = bytes.NewReader(b[:max])
+
+ // Send the header
+ s.sendHdr.encode(typeData, flags, s.id, max)
+ if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil {
+ return 0, err
+ }
+
+ // Reduce our send window
+ atomic.AddUint32(&s.sendWindow, ^uint32(max-1))
+
+ // Unlock
+ return int(max), err
+
+WAIT:
+ var timeout <-chan time.Time
+ if !s.writeDeadline.IsZero() {
+ delay := s.writeDeadline.Sub(time.Now())
+ timeout = time.After(delay)
+ }
+ select {
+ case <-s.sendNotifyCh:
+ goto START
+ case <-timeout:
+ return 0, ErrTimeout
+ }
+ return 0, nil
+}
+
+// sendFlags determines any flags that are appropriate
+// based on the current stream state
+func (s *Stream) sendFlags() uint16 {
+ s.stateLock.Lock()
+ defer s.stateLock.Unlock()
+ var flags uint16
+ switch s.state {
+ case streamInit:
+ flags |= flagSYN
+ s.state = streamSYNSent
+ case streamSYNReceived:
+ flags |= flagACK
+ s.state = streamEstablished
+ }
+ return flags
+}
+
+// sendWindowUpdate potentially sends a window update enabling
+// further writes to take place. Must be invoked with the lock.
+func (s *Stream) sendWindowUpdate() error {
+ s.controlHdrLock.Lock()
+ defer s.controlHdrLock.Unlock()
+
+ // Determine the delta update
+ max := s.session.config.MaxStreamWindowSize
+ delta := max - atomic.LoadUint32(&s.recvWindow)
+
+ // Determine the flags if any
+ flags := s.sendFlags()
+
+ // Check if we can omit the update
+ if delta < (max/2) && flags == 0 {
+ return nil
+ }
+
+ // Update our window
+ atomic.AddUint32(&s.recvWindow, delta)
+
+ // Send the header
+ s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta)
+ if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
+ return err
+ }
+ return nil
+}
+
+// sendClose is used to send a FIN
+func (s *Stream) sendClose() error {
+ s.controlHdrLock.Lock()
+ defer s.controlHdrLock.Unlock()
+
+ flags := s.sendFlags()
+ flags |= flagFIN
+ s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0)
+ if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Close is used to close the stream
+func (s *Stream) Close() error {
+ closeStream := false
+ s.stateLock.Lock()
+ switch s.state {
+ // Opened means we need to signal a close
+ case streamSYNSent:
+ fallthrough
+ case streamSYNReceived:
+ fallthrough
+ case streamEstablished:
+ s.state = streamLocalClose
+ goto SEND_CLOSE
+
+ case streamLocalClose:
+ case streamRemoteClose:
+ s.state = streamClosed
+ closeStream = true
+ goto SEND_CLOSE
+
+ case streamClosed:
+ case streamReset:
+ default:
+ panic("unhandled state")
+ }
+ s.stateLock.Unlock()
+ return nil
+SEND_CLOSE:
+ s.stateLock.Unlock()
+ s.sendClose()
+ s.notifyWaiting()
+ if closeStream {
+ s.session.closeStream(s.id)
+ }
+ return nil
+}
+
+// forceClose is used for when the session is exiting
+func (s *Stream) forceClose() {
+ s.stateLock.Lock()
+ s.state = streamClosed
+ s.stateLock.Unlock()
+ s.notifyWaiting()
+}
+
+// processFlags is used to update the state of the stream
+// based on set flags, if any. Lock must be held
+func (s *Stream) processFlags(flags uint16) error {
+ // Close the stream without holding the state lock
+ closeStream := false
+ defer func() {
+ if closeStream {
+ s.session.closeStream(s.id)
+ }
+ }()
+
+ s.stateLock.Lock()
+ defer s.stateLock.Unlock()
+ if flags&flagACK == flagACK {
+ if s.state == streamSYNSent {
+ s.state = streamEstablished
+ }
+ s.session.establishStream(s.id)
+ }
+ if flags&flagFIN == flagFIN {
+ switch s.state {
+ case streamSYNSent:
+ fallthrough
+ case streamSYNReceived:
+ fallthrough
+ case streamEstablished:
+ s.state = streamRemoteClose
+ s.notifyWaiting()
+ case streamLocalClose:
+ s.state = streamClosed
+ closeStream = true
+ s.notifyWaiting()
+ default:
+ s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state)
+ return ErrUnexpectedFlag
+ }
+ }
+ if flags&flagRST == flagRST {
+ s.state = streamReset
+ closeStream = true
+ s.notifyWaiting()
+ }
+ return nil
+}
+
+// notifyWaiting notifies all the waiting channels
+func (s *Stream) notifyWaiting() {
+ asyncNotify(s.recvNotifyCh)
+ asyncNotify(s.sendNotifyCh)
+}
+
+// incrSendWindow updates the size of our send window
+func (s *Stream) incrSendWindow(hdr header, flags uint16) error {
+ if err := s.processFlags(flags); err != nil {
+ return err
+ }
+
+ // Increase window, unblock a sender
+ atomic.AddUint32(&s.sendWindow, hdr.Length())
+ asyncNotify(s.sendNotifyCh)
+ return nil
+}
+
+// readData is used to handle a data frame
+func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
+ if err := s.processFlags(flags); err != nil {
+ return err
+ }
+
+ // Check that our recv window is not exceeded
+ length := hdr.Length()
+ if length == 0 {
+ return nil
+ }
+ if remain := atomic.LoadUint32(&s.recvWindow); length > remain {
+ s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length)
+ return ErrRecvWindowExceeded
+ }
+
+ // Wrap in a limited reader
+ conn = &io.LimitedReader{R: conn, N: int64(length)}
+
+ // Copy into buffer
+ s.recvLock.Lock()
+ if s.recvBuf == nil {
+ // Allocate the receive buffer just-in-time to fit the full data frame.
+ // This way we can read in the whole packet without further allocations.
+ s.recvBuf = bytes.NewBuffer(make([]byte, 0, length))
+ }
+ if _, err := io.Copy(s.recvBuf, conn); err != nil {
+ s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err)
+ s.recvLock.Unlock()
+ return err
+ }
+
+ // Decrement the receive window
+ atomic.AddUint32(&s.recvWindow, ^uint32(length-1))
+ s.recvLock.Unlock()
+
+ // Unblock any readers
+ asyncNotify(s.recvNotifyCh)
+ return nil
+}
+
+// SetDeadline sets the read and write deadlines
+func (s *Stream) SetDeadline(t time.Time) error {
+ if err := s.SetReadDeadline(t); err != nil {
+ return err
+ }
+ if err := s.SetWriteDeadline(t); err != nil {
+ return err
+ }
+ return nil
+}
+
+// SetReadDeadline sets the deadline for future Read calls.
+func (s *Stream) SetReadDeadline(t time.Time) error {
+ s.readDeadline = t
+ return nil
+}
+
+// SetWriteDeadline sets the deadline for future Write calls
+func (s *Stream) SetWriteDeadline(t time.Time) error {
+ s.writeDeadline = t
+ return nil
+}
+
+// Shrink is used to compact the amount of buffers utilized
+// This is useful when using Yamux in a connection pool to reduce
+// the idle memory utilization.
+func (s *Stream) Shrink() {
+ s.recvLock.Lock()
+ if s.recvBuf != nil && s.recvBuf.Len() == 0 {
+ s.recvBuf = nil
+ }
+ s.recvLock.Unlock()
+}
diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go
new file mode 100644
index 00000000..5fe45afc
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/util.go
@@ -0,0 +1,28 @@
+package yamux
+
+// asyncSendErr is used to try an async send of an error
+func asyncSendErr(ch chan error, err error) {
+ if ch == nil {
+ return
+ }
+ select {
+ case ch <- err:
+ default:
+ }
+}
+
+// asyncNotify is used to signal a waiting goroutine
+func asyncNotify(ch chan struct{}) {
+ select {
+ case ch <- struct{}{}:
+ default:
+ }
+}
+
+// min computes the minimum of two values
+func min(a, b uint32) uint32 {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/hooklift/iso9660/LICENSE b/vendor/github.com/hooklift/iso9660/LICENSE
new file mode 100644
index 00000000..52d13511
--- /dev/null
+++ b/vendor/github.com/hooklift/iso9660/LICENSE
@@ -0,0 +1,374 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hooklift/iso9660/README.md b/vendor/github.com/hooklift/iso9660/README.md
new file mode 100644
index 00000000..8a529a4a
--- /dev/null
+++ b/vendor/github.com/hooklift/iso9660/README.md
@@ -0,0 +1,25 @@
+# iso9660
+[![GoDoc](https://godoc.org/github.com/hooklift/iso9660?status.svg)](https://godoc.org/github.com/hooklift/iso9660)
+[![Build Status](https://travis-ci.org/hooklift/iso9660.svg?branch=master)](https://travis-ci.org/hooklift/iso9660)
+
+Go library and CLI to extract data from ISO9660 images.
+
+### CLI
+```
+$ ./iso9660
+Usage:
+ iso9660 <image-path> <destination-path>
+ iso9660 -h | --help
+ iso9660 --version
+```
+
+### Library
+An example on how to use the library to extract files and directories from an ISO image can be found in our CLI source code at:
+https://github.com/hooklift/iso9660/blob/master/cmd/iso9660/main.go
+
+### Not supported
+* Reading files recorded in interleave mode
+* Multi-extent or reading individual files larger than 4GB
+* Joliet extensions, meaning that file names longer than 32 characters are going to be truncated. Unicode characters are not going to be properly decoded either.
+* Multisession extension
+* Rock Ridge extension, making unable to return recorded POSIX permissions, timestamps as well as owner and group for files and directories.
diff --git a/vendor/github.com/hooklift/iso9660/iso9660.go b/vendor/github.com/hooklift/iso9660/iso9660.go
new file mode 100644
index 00000000..c5a114cd
--- /dev/null
+++ b/vendor/github.com/hooklift/iso9660/iso9660.go
@@ -0,0 +1,418 @@
+// Package iso9660 implements ECMA-119 standard, also known as ISO 9660.
+//
+// References:
+//
+// * https://en.wikipedia.org/wiki/ISO_9660
+//
+// * http://alumnus.caltech.edu/~pje/iso9660.html
+//
+// * http://users.telenet.be/it3.consultants.bvba/handouts/ISO9960.html
+//
+// * http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-119.pdf
+//
+// * http://www.drdobbs.com/database/inside-the-iso-9660-filesystem-format/184408899
+//
+// * http://www.cdfs.com
+//
+// * http://wiki.osdev.org/ISO_9660
+package iso9660
+
+import (
+ "io"
+ "os"
+ "strings"
+ "time"
+)
+
+// File represents a concrete implementation of os.FileInfo interface for
+// accessing ISO 9660 file data
+type File struct {
+ DirectoryRecord
+ fileID string
+ // We have the raw image here only to be able to access file extents
+ image *os.File
+}
+
+// Name returns the file's name.
+func (f *File) Name() string {
+ name := strings.Split(f.fileID, ";")[0]
+ return strings.ToLower(name)
+}
+
+// Size returns the file size in bytes
+func (f *File) Size() int64 {
+ return int64(f.ExtentLengthBE)
+}
+
+// Mode returns file's mode and permissions bits. Since we don't yet support
+// Rock Ridge extensions we cannot extract POSIX permissions and the rest of the
+// normal metadata. So, right we return 0740 for directories and 0640 for files.
+func (f *File) Mode() os.FileMode {
+ if f.IsDir() {
+ return os.FileMode(0740)
+ }
+ return os.FileMode(0640)
+}
+
+// ModTime returns file's modification time.
+func (f *File) ModTime() time.Time {
+ return time.Now()
+}
+
+// IsDir tells whether the file is a directory or not.
+func (f *File) IsDir() bool {
+ if (f.FileFlags & 2) == 2 {
+ return true
+ }
+ return false
+}
+
+// Sys returns io.Reader instance pointing to the file's content if it is not a directory, nil otherwise.
+func (f *File) Sys() interface{} {
+ if f.IsDir() {
+ return nil
+ }
+
+ return io.NewSectionReader(f.image, int64(f.ExtentLocationBE*sectorSize), int64(f.ExtentLengthBE))
+}
+
+const (
+ bootRecord = 0
+ primaryVol = 1
+ supplementaryVol = 2
+ volPartition = 3
+ volSetTerminator = 255
+ // System area goes from sectors 0x00 to 0x0F. Volume descriptors can be
+ // found starting at sector 0x10
+ dataAreaSector = 0x10
+ sectorSize = 2048
+)
+
+// VolumeDescriptor identify the volume, the partitions recorded on the volume,
+// the volume creator(s), certain attributes of the volume, the location of
+// other recorded descriptors and the version of the standard which applies
+// to the volume descriptor.
+//
+// When preparing to mount a CD, your first action will be reading the volume
+// descriptors (specifically, you will be looking for the Primary Volume Descriptor).
+// Since sectors 0x00-0x0F of the CD are reserved as System Area, the Volume
+// Descriptors can be found starting at sector 0x10.
+type VolumeDescriptor struct {
+ // 0: BootRecord
+ // 1: Primary Volume Descriptor
+ // 2: Supplementary Volume Descriptor
+ // 3: Volume Partition Descriptor
+ // 4-254: Reserved
+ // 255: Volume Descriptor Set Terminator
+ Type byte
+ // Always "CD001".
+ StandardID [5]byte
+ //Volume Descriptor Version (0x01).
+ Version byte
+}
+
+// BootRecord identifies a system which can recognize and act upon the content
+// of the field reserved for boot system use in the Boot Record, and shall
+// contain information which is used to achieve a specific state for a system or
+// for an application.
+type BootRecord struct {
+ VolumeDescriptor
+ SystemID [32]byte
+ ID [32]byte
+ SystemUse [1977]byte
+}
+
+// Terminator indicates the termination of a Volume Descriptor Set.
+type Terminator struct {
+ VolumeDescriptor
+ // All bytes of this field are set to (00).
+ Reserved [2041]byte
+}
+
+// PrimaryVolumePart1 represents the Primary Volume Descriptor first half, before the
+// root directory record. We are only reading big-endian values so placeholders
+// are used for little-endian ones.
+type PrimaryVolumePart1 struct {
+ VolumeDescriptor
+ // Unused
+ _ byte // 00
+ // The name of the system that can act upon sectors 0x00-0x0F for the volume.
+ SystemID [32]byte
+ // Identification of this volume.
+ ID [32]byte
+ //Unused2
+ _ [8]byte
+ // Amount of data available on the CD-ROM. Ignores little-endian order.
+ // Takes big-endian encoded value.
+ VolumeSpaceSizeLE int32
+ VolumeSpaceSizeBE int32
+ Unused2 [32]byte
+ // The size of the set in this logical volume (number of disks). Ignores
+ // little-endian order. Takes big-endian encoded value.
+ VolumeSetSizeLE int16
+ VolumeSetSizeBE int16
+ // The number of this disk in the Volume Set. Ignores little-endian order.
+ // Takes big-endian encoded value.
+ VolumeSeqNumberLE int16
+ VolumeSeqNumberBE int16
+ // The size in bytes of a logical block. NB: This means that a logical block
+ // on a CD could be something other than 2 KiB!
+ LogicalBlkSizeLE int16
+ LogicalBlkSizeBE int16
+ // The size in bytes of the path table. Ignores little-endian order.
+ // Takes big-endian encoded value.
+ PathTableSizeLE int32
+ PathTableSizeBE int32
+ // LBA location of the path table. The path table pointed to contains only
+ // little-endian values.
+ LocPathTableLE int32
+ // LBA location of the optional path table. The path table pointed to contains
+ // only little-endian values. Zero means that no optional path table exists.
+ LocOptPathTableLE int32
+ // LBA location of the path table. The path table pointed to contains
+ // only big-endian values.
+ LocPathTableBE int32
+ // LBA location of the optional path table. The path table pointed to contains
+ // only big-endian values. Zero means that no optional path table exists.
+ LocOptPathTableBE int32
+}
+
+// DirectoryRecord describes the characteristics of a file or directory,
+// beginning with a length octet describing the size of the entire entry.
+// Entries themselves are of variable length, up to 255 octets in size.
+// Attributes for the file described by the directory entry are stored in the
+// directory entry itself (unlike UNIX).
+// The root directory entry is a variable length object, so that the name can be of variable length.
+//
+// Important: before each entry there can be "fake entries" to support the Long File Name.
+//
+// Even if a directory spans multiple sectors, the directory entries are not
+// permitted to cross the sector boundary (unlike the path table). Where there
+// is not enough space to record an entire directory entry at the end of a sector,
+// that sector is zero-padded and the next consecutive sector is used.
+// Unfortunately, the date/time format is different from that used in the Primary
+// Volume Descriptor.
+type DirectoryRecord struct {
+ // Extended Attribute Record length, stored at the beginning of
+ // the file's extent.
+ ExtendedAttrLen byte
+ // Location of extent (Logical Block Address) in both-endian format.
+ ExtentLocationLE uint32
+ ExtentLocationBE uint32
+ // Data length (size of extent) in both-endian format.
+ ExtentLengthLE uint32
+ ExtentLengthBE uint32
+ // Date and the time of the day at which the information in the Extent
+ // described by the Directory Record was recorded.
+ RecordedTime [7]byte
+ // If this Directory Record identifies a directory then bit positions 2, 3
+ // and 7 shall be set to ZERO. If no Extended Attribute Record is associated
+ // with the File Section identified by this Directory Record then bit
+ // positions 3 and 4 shall be set to ZERO. -- 9.1.6
+ FileFlags byte
+ // File unit size for files recorded in interleaved mode, zero otherwise.
+ FileUnitSize byte
+ // Interleave gap size for files recorded in interleaved mode, zero otherwise.
+ InterleaveGapSize byte
+ // Volume sequence number - the volume that this extent is recorded on, in
+ // 16 bit both-endian format.
+ VolumeSeqNumberLE uint16
+ VolumeSeqNumberBE uint16
+ // Length of file identifier (file name). This terminates with a ';'
+ // character followed by the file ID number in ASCII coded decimal ('1').
+ FileIDLength byte
+ // The interpretation of this field depends as follows on the setting of the
+ // Directory bit of the File Flags field. If set to ZERO, it shall mean:
+ //
+ // − The field shall specify an identification for the file.
+ // − The characters in this field shall be d-characters or d1-characters, SEPARATOR 1, SEPARATOR 2.
+ // − The field shall be recorded as specified in 7.5. If set to ONE, it shall mean:
+ // − The field shall specify an identification for the directory.
+ // − The characters in this field shall be d-characters or d1-characters, or only a (00) byte, or only a (01) byte.
+ // − The field shall be recorded as specified in 7.6.
+ // fileID string
+}
+
+// PrimaryVolumePart2 represents the Primary Volume Descriptor half after the
+// root directory record.
+type PrimaryVolumePart2 struct {
+ // Identifier of the volume set of which this volume is a member.
+ VolumeSetID [128]byte
+ // The volume publisher. For extended publisher information, the first byte
+ // should be 0x5F, followed by the filename of a file in the root directory.
+ // If not specified, all bytes should be 0x20.
+ PublisherID [128]byte
+ // The identifier of the person(s) who prepared the data for this volume.
+ // For extended preparation information, the first byte should be 0x5F,
+ // followed by the filename of a file in the root directory. If not specified,
+ // all bytes should be 0x20.
+ DataPreparerID [128]byte
+ // Identifies how the data are recorded on this volume. For extended information,
+ // the first byte should be 0x5F, followed by the filename of a file in the root
+ // directory. If not specified, all bytes should be 0x20.
+ AppID [128]byte
+ // Filename of a file in the root directory that contains copyright
+ // information for this volume set. If not specified, all bytes should be 0x20.
+ CopyrightFileID [37]byte
+ // Filename of a file in the root directory that contains abstract information
+ // for this volume set. If not specified, all bytes should be 0x20.
+ AbstractFileID [37]byte
+ // Filename of a file in the root directory that contains bibliographic
+ // information for this volume set. If not specified, all bytes should be 0x20.
+ BibliographicFileID [37]byte
+ // The date and time of when the volume was created.
+ CreationTime [17]byte
+ // The date and time of when the volume was modified.
+ ModificationTime [17]byte
+ // The date and time after which this volume is considered to be obsolete.
+ // If not specified, then the volume is never considered to be obsolete.
+ ExpirationTime [17]byte
+ // The date and time after which the volume may be used. If not specified,
+ // the volume may be used immediately.
+ EffectiveTime [17]byte
+ // The directory records and path table version (always 0x01).
+ FileStructVersion byte
+ // Reserved. Always 0x00.
+ _ byte
+ // Contents not defined by ISO 9660.
+ AppUse [512]byte
+ // Reserved by ISO.
+ _ [653]byte
+}
+
+// PrimaryVolume descriptor acts much like the superblock of the UNIX filesystem, providing
+// details on the ISO-9660 compliant portions of the disk. While we can have
+// many kinds of filesystems on a single ISO-9660 CD-ROM, we can have only one
+// ISO-9660 file structure (found as the primary volume-descriptor type).
+//
+// Directory entries are successively stored within this region. Evaluation of
+// the ISO 9660 filenames is begun at this location. The root directory is stored
+// as an extent, or sequential series of sectors, that contains each of the
+// directory entries appearing in the root.
+//
+// Since ISO 9660 works by segmenting the CD-ROM into logical blocks, the size
+// of these blocks is found in the primary volume descriptor as well.
+type PrimaryVolume struct {
+ PrimaryVolumePart1
+ DirectoryRecord DirectoryRecord
+ PrimaryVolumePart2
+}
+
+// SupplementaryVolume is used by Joliet.
+type SupplementaryVolume struct {
+ VolumeDescriptor
+ Flags int `struc:"int8"`
+ SystemID string `struc:"[32]byte"`
+ ID string `struc:"[32]byte"`
+ Unused byte
+ VolumeSpaceSize int `struc:"int32"`
+ EscapeSequences string `struc:"[32]byte"`
+ VolumeSetSize int `struc:"int16"`
+ VolumeSeqNumber int `struc:"int16"`
+ LogicalBlkSize int `struc:"int16"`
+ PathTableSize int `struc:"int32"`
+ LocLPathTable int `struc:"int32"`
+ LocOptLPathTable int `struc:"int32"`
+ LocMPathTable int `struc:"int32"`
+ LocOptMPathTable int `struc:"int32"`
+ RootDirRecord DirectoryRecord
+ VolumeSetID string `struc:"[128]byte"`
+ PublisherID string `struc:"[128]byte"`
+ DataPreparerID string `struc:"[128]byte"`
+ AppID string `struc:"[128]byte"`
+ CopyrightFileID string `struc:"[37]byte"`
+ AbstractFileID string `struc:"[37]byte"`
+ BibliographicFileID string `struc:"[37]byte"`
+ CreationTime Timestamp
+ ModificationTime Timestamp
+ ExpirationTime Timestamp
+ EffectiveTime Timestamp
+ FileStructVersion int `struc:"int8"`
+ Reserved byte
+ AppData [512]byte
+ Reserved2 byte
+}
+
+// PartitionVolume ...
+type PartitionVolume struct {
+ VolumeDescriptor
+ Unused byte
+ SystemID string `struc:"[32]byte"`
+ ID string `struc:"[32]byte"`
+ Location int `struc:"int8"`
+ Size int `struc:"int8"`
+ SystemUse [1960]byte
+}
+
+// Timestamp ...
+type Timestamp struct {
+ Year int `struc:"[4]byte"`
+ Month int `struc:"[2]byte"`
+ DayOfMonth int `struc:"[2]byte"`
+ Hour int `struc:"[2]byte"`
+ Minute int `struc:"[2]byte"`
+ Second int `struc:"[2]byte"`
+ Millisecond int `struc:"[2]byte"`
+ GMTOffset int `struc:"uint8"`
+}
+
+// ExtendedAttrRecord are simply a way to extend the attributes of files.
+// Since attributes vary according to the user, most everyone has a different
+// opinion on what a file attribute should specify.
+type ExtendedAttrRecord struct {
+ OwnerID int `struc:"int16"`
+ GroupID int `struc:"int16"`
+ Permissions int `struc:"int16"`
+ CreationTime Timestamp
+ ModificationTime Timestamp
+ ExpirationTime Timestamp
+ // Specifies the date and the time of the day at which the information in
+ // the file may be used. If the date and time are not specified then the
+ // information may be used at once.
+ EffectiveTime Timestamp
+ Format int `struc:"uint8"`
+ Attributes int `struc:"uint8"`
+ Length int `struc:"int16"`
+ SystemID string `struc:"[32]byte"`
+ SystemUse [64]byte
+ Version int `struc:"uint8"`
+ EscapeSeqLength int `struc:"uint8"`
+ Reserved [64]byte
+ AppUseLength int `struc:"int16,sizeof=AppUse"`
+ AppUse []byte
+ EscapeSequences []byte `struc:"sizefrom=AppUseLength"`
+}
+
+// PathTable contains a well-ordered sequence of records describing every
+// directory extent on the CD. There are some exceptions with this: the Path
+// Table can only contain 65536 records, due to the length of the "Parent Directory Number" field.
+// If there are more than this number of directories on the disc, some CD
+// authoring software will ignore this limit and create a non-compliant
+// CD (this applies to some earlier versions of Nero, for example). If your
+// file system uses the path table, you should be aware of this possibility.
+// Windows uses the Path Table and will fail with such non-compliant
+// CD's (additional nodes exist but appear as zero-byte). Linux, which uses
+// the directory tables is not affected by this issue. The location of the path
+// tables can be found in the Primary Volume Descriptor. There are two table types
+// - the L-Path table (relevant to x86) and the M-Path table. The only
+// difference between these two tables is that multi-byte values in the L-Table
+// are LSB-first and the values in the M-Table are MSB-first.
+//
+// The path table is in ascending order of directory level and is alphabetically
+// sorted within each directory level.
+type PathTable struct {
+ DirIDLength int `struc:"uint8,sizeof=DirName"`
+ ExtendedAttrsLen int `struc:"uint8"`
+ // Number the Logical Block Number of the first Logical Block allocated to
+ // the Extent in which the directory is recorded.
+ // This is in a different format depending on whether this is the L-Table or
+ // M-Table (see explanation above).
+ ExtentLocation int `struc:"int32"`
+ // Number of record for parent directory (or 1 for the root directory), as a
+ // word; the first record is number 1, the second record is number 2, etc.
+ // Directory number of parent directory (an index in to the path table).
+ // This is the field that limits the table to 65536 records.
+ ParentDirNumber int `struc:"int16"`
+ // Directory Identifier (name) in d-characters.
+ DirName string
+}
diff --git a/vendor/github.com/hooklift/iso9660/reader.go b/vendor/github.com/hooklift/iso9660/reader.go
new file mode 100644
index 00000000..1c7d00a6
--- /dev/null
+++ b/vendor/github.com/hooklift/iso9660/reader.go
@@ -0,0 +1,251 @@
+package iso9660
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/c4milo/gotoolkit"
+)
+
+var (
+ // ErrInvalidImage is returned when an attempt to unpack the image Primary Volume Descriptor failed or
+ // when the end of the image was reached without finding a primary volume descriptor
+ ErrInvalidImage = func(err error) error { return fmt.Errorf("invalid-iso9660-image: %s", err) }
+ // ErrCorruptedImage is returned when a seek operation, on the image, failed.
+ ErrCorruptedImage = func(err error) error { return fmt.Errorf("corrupted-image: %s", err) }
+)
+
+// Reader defines the state of the ISO9660 image reader. It needs to be instantiated
+// from its constructor.
+type Reader struct {
+ // File descriptor to the opened ISO image
+ image *os.File
+ // Copy of unencoded Primary Volume Descriptor
+ pvd PrimaryVolume
+ // Queue used to walk through file system iteratively
+ queue gotoolkit.Queue
+ // Current sector
+ sector uint32
+ // Current bytes read from current sector.
+ read uint32
+}
+
+// NewReader creates a new ISO 9660 image reader.
+func NewReader(rs *os.File) (*Reader, error) {
+ // Starts reading from image data area
+ sector := dataAreaSector
+ // Iterates over volume descriptors until it finds the primary volume descriptor
+ // or an error condition.
+ for {
+ offset, err := rs.Seek(int64(sector*sectorSize), os.SEEK_SET)
+ if err != nil {
+ return nil, ErrCorruptedImage(err)
+ }
+
+ var volDesc VolumeDescriptor
+ if err := binary.Read(rs, binary.BigEndian, &volDesc); err != nil {
+ return nil, ErrCorruptedImage(err)
+ }
+
+ if volDesc.Type == primaryVol {
+ // backs up to the beginning of the sector again in order to unpack
+ // the entire primary volume descriptor more easily.
+ if _, err := rs.Seek(offset, os.SEEK_SET); err != nil {
+ return nil, ErrCorruptedImage(err)
+ }
+
+ reader := new(Reader)
+ reader.image = rs
+ reader.queue = new(gotoolkit.SliceQueue)
+
+ if err := reader.unpackPVD(); err != nil {
+ return nil, ErrCorruptedImage(err)
+ }
+
+ return reader, nil
+ }
+
+ if volDesc.Type == volSetTerminator {
+ return nil, ErrInvalidImage(errors.New("Volume Set Terminator reached. A Primary Volume Descriptor was not found."))
+ }
+ sector++
+ }
+}
+
+// Skip skips the given number of directory records.
+func (r *Reader) Skip(n int) error {
+ var drecord File
+ var len byte
+ var err error
+ for i := 0; i < n; i++ {
+ if len, err = r.unpackDRecord(&drecord); err != nil {
+ return err
+ }
+ r.read += uint32(len)
+ }
+ return nil
+}
+
+// Next moves onto the next directory record present in the image.
+// It does not use the Path Table since the goal is to read everything
+// from the ISO image.
+func (r *Reader) Next() (os.FileInfo, error) {
+ if r.queue.IsEmpty() {
+ return nil, io.EOF
+ }
+
+ // We only dequeue the directory when it does not contain more children
+ // or when it is empty and there is no children to iterate over.
+ item, err := r.queue.Peek()
+ if err != nil {
+ panic(err)
+ }
+
+ f := item.(File)
+ if r.sector == 0 {
+ r.sector = f.ExtentLocationBE
+ _, err := r.image.Seek(int64(r.sector*sectorSize), os.SEEK_SET)
+ if err != nil {
+ return nil, ErrCorruptedImage(err)
+ }
+
+ // Skips . and .. directories
+ if err = r.Skip(2); err != nil {
+ return nil, ErrCorruptedImage(err)
+ }
+ }
+
+ var drecord File
+ var len byte
+ if (r.read % sectorSize) == 0 {
+ r.sector++
+ _, err := r.image.Seek(int64(r.sector*sectorSize), os.SEEK_SET)
+ if err != nil {
+ return nil, ErrCorruptedImage(err)
+ }
+ }
+
+ if len, err = r.unpackDRecord(&drecord); err != nil && err != io.EOF {
+ return nil, ErrCorruptedImage(err)
+ }
+ r.read += uint32(len)
+
+ if err == io.EOF {
+ // directory record is empty, sector space wasted, move onto next sector.
+ rsize := (sectorSize - (r.read % sectorSize))
+ buf := make([]byte, rsize)
+ if err := binary.Read(r.image, binary.BigEndian, buf); err != nil {
+ return nil, ErrCorruptedImage(err)
+ }
+ r.read += rsize
+ }
+
+ // If there is no more entries in the current directory, dequeue it.
+ if r.read >= f.ExtentLengthBE {
+ r.read = 0
+ r.sector = 0
+ r.queue.Dequeue()
+ }
+
+ // End of directory listing, drecord is empty so we don't bother
+ // to return it and keep iterating to look for the next actual
+ // directory or file.
+ if drecord.fileID == "" {
+ return r.Next()
+ }
+
+ parent := f.Name()
+ if parent == "\x00" {
+ parent = "/"
+ }
+ drecord.fileID = filepath.Join(parent, drecord.fileID)
+
+ if drecord.IsDir() {
+ r.queue.Enqueue(drecord)
+ } else {
+ drecord.image = r.image
+ }
+
+ return &drecord, nil
+}
+
+// unpackDRecord unpacks directory record bits into Go's struct
+func (r *Reader) unpackDRecord(f *File) (byte, error) {
+ // Gets the directory record length
+ var len byte
+ if err := binary.Read(r.image, binary.BigEndian, &len); err != nil {
+ return len, ErrCorruptedImage(err)
+ }
+
+ if len == 0 {
+ return len + 1, io.EOF
+ }
+
+ // Reads directory record into Go struct
+ var drecord DirectoryRecord
+ if err := binary.Read(r.image, binary.BigEndian, &drecord); err != nil {
+ return len, ErrCorruptedImage(err)
+ }
+
+ f.DirectoryRecord = drecord
+ // Gets the name
+ name := make([]byte, drecord.FileIDLength)
+ if err := binary.Read(r.image, binary.BigEndian, name); err != nil {
+ return len, ErrCorruptedImage(err)
+ }
+ f.fileID = string(name)
+
+ // Padding field as per section 9.1.12 in ECMA-119
+ if (drecord.FileIDLength % 2) == 0 {
+ var zero byte
+ if err := binary.Read(r.image, binary.BigEndian, &zero); err != nil {
+ return len, ErrCorruptedImage(err)
+ }
+ }
+
+ // System use field as per section 9.1.13 in ECMA-119
+ // Directory record has 34 bytes in addition to the name's
+ // variable length and the padding field mentioned in section 9.1.12
+ totalLen := 34 + drecord.FileIDLength - (drecord.FileIDLength % 2)
+ sysUseLen := int64(len - totalLen)
+ if sysUseLen > 0 {
+ sysData := make([]byte, sysUseLen)
+ if err := binary.Read(r.image, binary.BigEndian, sysData); err != nil {
+ return len, ErrCorruptedImage(err)
+ }
+ }
+ return len, nil
+}
+
+// unpackPVD unpacks Primary Volume Descriptor in three phases. This is
+// because the root directory record is a variable-length record and Go's binary
+// package doesn't support unpacking variable-length structs easily.
+func (r *Reader) unpackPVD() error {
+ // Unpack first half
+ var pvd1 PrimaryVolumePart1
+ if err := binary.Read(r.image, binary.BigEndian, &pvd1); err != nil {
+ return ErrCorruptedImage(err)
+ }
+ r.pvd.PrimaryVolumePart1 = pvd1
+
+ // Unpack root directory record
+ var drecord File
+ if _, err := r.unpackDRecord(&drecord); err != nil {
+ return ErrCorruptedImage(err)
+ }
+ r.pvd.DirectoryRecord = drecord.DirectoryRecord
+ r.queue.Enqueue(drecord)
+
+ // Unpack second half
+ var pvd2 PrimaryVolumePart2
+ if err := binary.Read(r.image, binary.BigEndian, &pvd2); err != nil {
+ return ErrCorruptedImage(err)
+ }
+ r.pvd.PrimaryVolumePart2 = pvd2
+
+ return nil
+}
diff --git a/vendor/github.com/hooklift/iso9660/writer.go b/vendor/github.com/hooklift/iso9660/writer.go
new file mode 100644
index 00000000..f23a9f7f
--- /dev/null
+++ b/vendor/github.com/hooklift/iso9660/writer.go
@@ -0,0 +1 @@
+package iso9660
diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE
new file mode 100644
index 00000000..68668029
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 Dario Castañé. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
new file mode 100644
index 00000000..b1310697
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -0,0 +1,141 @@
+# Mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche.
+
+![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg)
+
+## Status
+
+It is ready for production use. It works fine after extensive use in the wild.
+
+[![Build Status][1]][2]
+[![GoDoc][3]][4]
+[![GoCard][5]][6]
+
+[1]: https://travis-ci.org/imdario/mergo.png
+[2]: https://travis-ci.org/imdario/mergo
+[3]: https://godoc.org/github.com/imdario/mergo?status.svg
+[4]: https://godoc.org/github.com/imdario/mergo
+[5]: https://goreportcard.com/badge/imdario/mergo
+[6]: https://goreportcard.com/report/github.com/imdario/mergo
+
+### Important note
+
+Mergo is intended to assign **only** zero value fields on destination with source value. Since April 6th it works like this. Before it didn't work properly, causing some random overwrites. After some issues and PRs I found it didn't merge as I designed it. Thanks to [imdario/mergo#8](https://github.com/imdario/mergo/pull/8) overwriting functions were added and the wrong behavior was clearly detected.
+
+If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
+
+### Mergo in the wild
+
+- [docker/docker](https://github.com/docker/docker/)
+- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
+- [imdario/zas](https://github.com/imdario/zas)
+- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
+- [EagerIO/Stout](https://github.com/EagerIO/Stout)
+- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
+- [russross/canvasassignments](https://github.com/russross/canvasassignments)
+- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
+- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
+- [divshot/gitling](https://github.com/divshot/gitling)
+- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
+- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
+- [elwinar/rambler](https://github.com/elwinar/rambler)
+- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
+- [jfbus/impressionist](https://github.com/jfbus/impressionist)
+- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
+- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
+- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
+- [thoas/picfit](https://github.com/thoas/picfit)
+- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
+- [jnuthong/item_search](https://github.com/jnuthong/item_search)
+- [Iris Web Framework](https://github.com/kataras/iris)
+
+## Installation
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+```go
+if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+}
+```
+
+Also, you can merge overwriting values using MergeWithOverwrite.
+
+```go
+if err := mergo.MergeWithOverwrite(&dst, src); err != nil {
+ // ...
+}
+```
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+```go
+if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+}
+```
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
+
+### Nice example
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/imdario/mergo"
+)
+
+type Foo struct {
+ A string
+ B int64
+}
+
+func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+
+ dest := Foo{
+ A: "two",
+ }
+
+ mergo.Merge(&dest, src)
+
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+}
+```
+
+Note: if test are failing due missing package, please execute:
+
+ go get gopkg.in/yaml.v1
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go
new file mode 100644
index 00000000..6e9aa7ba
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/doc.go
@@ -0,0 +1,44 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mergo merges same-type structs and maps by setting default values in zero-value fields.
+
+Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Usage
+
+From my own work-in-progress project:
+
+ type networkConfig struct {
+ Protocol string
+ Address string
+ ServerType string `json: "server_type"`
+ Port uint16
+ }
+
+ type FssnConfig struct {
+ Network networkConfig
+ }
+
+ var fssnDefault = FssnConfig {
+ networkConfig {
+ "tcp",
+ "127.0.0.1",
+ "http",
+ 31560,
+ },
+ }
+
+ // Inside a function [...]
+
+ if err := mergo.Merge(&config, fssnDefault); err != nil {
+ log.Fatal(err)
+ }
+
+ // More code [...]
+
+*/
+package mergo
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
new file mode 100644
index 00000000..8e8c4ba8
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/map.go
@@ -0,0 +1,156 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+ if s == "" {
+ return s
+ }
+ r, n := utf8.DecodeRuneInString(s)
+ return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+ r, _ := utf8.DecodeRuneInString(field.Name)
+ return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) {
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ zeroValue := reflect.Value{}
+ switch dst.Kind() {
+ case reflect.Map:
+ dstMap := dst.Interface().(map[string]interface{})
+ for i, n := 0, src.NumField(); i < n; i++ {
+ srcType := src.Type()
+ field := srcType.Field(i)
+ if !isExported(field) {
+ continue
+ }
+ fieldName := field.Name
+ fieldName = changeInitialCase(fieldName, unicode.ToLower)
+ if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
+ dstMap[fieldName] = src.Field(i).Interface()
+ }
+ }
+ case reflect.Struct:
+ srcMap := src.Interface().(map[string]interface{})
+ for key := range srcMap {
+ srcValue := srcMap[key]
+ fieldName := changeInitialCase(key, unicode.ToUpper)
+ dstElement := dst.FieldByName(fieldName)
+ if dstElement == zeroValue {
+ // We discard it because the field doesn't exist.
+ continue
+ }
+ srcElement := reflect.ValueOf(srcValue)
+ dstKind := dstElement.Kind()
+ srcKind := srcElement.Kind()
+ if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+ srcElement = srcElement.Elem()
+ srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+ } else if dstKind == reflect.Ptr {
+ // Can this work? I guess it can't.
+ if srcKind != reflect.Ptr && srcElement.CanAddr() {
+ srcPtr := srcElement.Addr()
+ srcElement = reflect.ValueOf(srcPtr)
+ srcKind = reflect.Ptr
+ }
+ }
+ if !srcElement.IsValid() {
+ continue
+ }
+ if srcKind == dstKind {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+ return
+ }
+ } else {
+ if srcKind == reflect.Map {
+ if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+ return
+ }
+ } else {
+ return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+ }
+ }
+ }
+ }
+ return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}) error {
+ return _map(dst, src, false)
+}
+
+// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by
+// non-empty src attribute values.
+func MapWithOverwrite(dst, src interface{}) error {
+ return _map(dst, src, true)
+}
+
+func _map(dst, src interface{}, overwrite bool) error {
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ // To be friction-less, we redirect equal-type arguments
+ // to deepMerge. Only because arguments can be anything.
+ if vSrc.Kind() == vDst.Kind() {
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite)
+ }
+ switch vSrc.Kind() {
+ case reflect.Struct:
+ if vDst.Kind() != reflect.Map {
+ return ErrExpectedMapAsDestination
+ }
+ case reflect.Map:
+ if vDst.Kind() != reflect.Struct {
+ return ErrExpectedStructAsDestination
+ }
+ default:
+ return ErrNotSupported
+ }
+ return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite)
+}
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
new file mode 100644
index 00000000..513774f4
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -0,0 +1,123 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "reflect"
+)
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) {
+ if !src.IsValid() {
+ return
+ }
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ switch dst.Kind() {
+ case reflect.Struct:
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil {
+ return
+ }
+ }
+ case reflect.Map:
+ for _, key := range src.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ continue
+ }
+ dstElement := dst.MapIndex(key)
+ switch srcElement.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice:
+ if srcElement.IsNil() {
+ continue
+ }
+ fallthrough
+ default:
+ if !srcElement.CanInterface() {
+ continue
+ }
+ switch reflect.TypeOf(srcElement.Interface()).Kind() {
+ case reflect.Struct:
+ fallthrough
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Map:
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+ return
+ }
+ }
+ }
+ if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) {
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ dst.SetMapIndex(key, srcElement)
+ }
+ }
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Interface:
+ if src.IsNil() {
+ break
+ } else if dst.IsNil() || overwrite {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil {
+ return
+ }
+ default:
+ if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ }
+ return
+}
+
+// Merge will fill any empty for value type attributes on the dst struct using corresponding
+// src attributes if they themselves are not empty. dst and src must be valid same-type structs
+// and dst must be a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively any exported field.
+func Merge(dst, src interface{}) error {
+ return merge(dst, src, false)
+}
+
+// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
+// non-empty src attribute values.
+func MergeWithOverwrite(dst, src interface{}) error {
+ return merge(dst, src, true)
+}
+
+func merge(dst, src interface{}, overwrite bool) error {
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ if vDst.Type() != vSrc.Type() {
+ return ErrDifferentArgumentsTypes
+ }
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite)
+}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
new file mode 100644
index 00000000..f8a0991e
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/mergo.go
@@ -0,0 +1,90 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "errors"
+ "reflect"
+)
+
+// Errors reported by Mergo when it finds invalid arguments.
+var (
+ ErrNilArguments = errors.New("src and dst must not be nil")
+ ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
+ ErrNotSupported = errors.New("only structs and maps are supported")
+ ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
+ ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+)
+
+// During deepMerge, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+ ptr uintptr
+ typ reflect.Type
+ next *visit
+}
+
+// From src/pkg/encoding/json.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
+ if dst == nil || src == nil {
+ err = ErrNilArguments
+ return
+ }
+ vDst = reflect.ValueOf(dst).Elem()
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+ err = ErrNotSupported
+ return
+ }
+ vSrc = reflect.ValueOf(src)
+ // We check if vSrc is a pointer to dereference it.
+ if vSrc.Kind() == reflect.Ptr {
+ vSrc = vSrc.Elem()
+ }
+ return
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ return // TODO refactor
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE
new file mode 100644
index 00000000..b03310a9
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2015 James Saryerwinnie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md
new file mode 100644
index 00000000..187ef676
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/README.md
@@ -0,0 +1,7 @@
+# go-jmespath - A JMESPath implementation in Go
+
+[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath)
+
+
+
+See http://jmespath.org for more info.
diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go
new file mode 100644
index 00000000..9cfa988b
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/api.go
@@ -0,0 +1,49 @@
+package jmespath
+
+import "strconv"
+
+// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
+// safe for concurrent use by multiple goroutines.
+type JMESPath struct {
+ ast ASTNode
+ intr *treeInterpreter
+}
+
+// Compile parses a JMESPath expression and returns, if successful, a JMESPath
+// object that can be used to match against data.
+func Compile(expression string) (*JMESPath, error) {
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
+ return jmespath, nil
+}
+
+// MustCompile is like Compile but panics if the expression cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled
+// JMESPaths.
+func MustCompile(expression string) *JMESPath {
+ jmespath, err := Compile(expression)
+ if err != nil {
+ panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
+ }
+ return jmespath
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
+ return jp.intr.Execute(jp.ast, data)
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func Search(expression string, data interface{}) (interface{}, error) {
+ intr := newInterpreter()
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(ast, data)
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
new file mode 100644
index 00000000..1cd2d239
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type astNodeType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
+
+var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
+
+func (i astNodeType) String() string {
+ if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
+ return fmt.Sprintf("astNodeType(%d)", i)
+ }
+ return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go
new file mode 100644
index 00000000..9b7cd89b
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/functions.go
@@ -0,0 +1,842 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type jpFunction func(arguments []interface{}) (interface{}, error)
+
+type jpType string
+
+const (
+ jpUnknown jpType = "unknown"
+ jpNumber jpType = "number"
+ jpString jpType = "string"
+ jpArray jpType = "array"
+ jpObject jpType = "object"
+ jpArrayNumber jpType = "array[number]"
+ jpArrayString jpType = "array[string]"
+ jpExpref jpType = "expref"
+ jpAny jpType = "any"
+)
+
+type functionEntry struct {
+ name string
+ arguments []argSpec
+ handler jpFunction
+ hasExpRef bool
+}
+
+type argSpec struct {
+ types []jpType
+ variadic bool
+}
+
+type byExprString struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprString) Len() int {
+ return len(a.items)
+}
+func (a *byExprString) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprString) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type byExprFloat struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprFloat) Len() int {
+ return len(a.items)
+}
+func (a *byExprFloat) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprFloat) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type functionCaller struct {
+ functionTable map[string]functionEntry
+}
+
+func newFunctionCaller() *functionCaller {
+ caller := &functionCaller{}
+ caller.functionTable = map[string]functionEntry{
+ "length": {
+ name: "length",
+ arguments: []argSpec{
+ {types: []jpType{jpString, jpArray, jpObject}},
+ },
+ handler: jpfLength,
+ },
+ "starts_with": {
+ name: "starts_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfStartsWith,
+ },
+ "abs": {
+ name: "abs",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfAbs,
+ },
+ "avg": {
+ name: "avg",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfAvg,
+ },
+ "ceil": {
+ name: "ceil",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfCeil,
+ },
+ "contains": {
+ name: "contains",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfContains,
+ },
+ "ends_with": {
+ name: "ends_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfEndsWith,
+ },
+ "floor": {
+ name: "floor",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfFloor,
+ },
+ "map": {
+ name: "amp",
+ arguments: []argSpec{
+ {types: []jpType{jpExpref}},
+ {types: []jpType{jpArray}},
+ },
+ handler: jpfMap,
+ hasExpRef: true,
+ },
+ "max": {
+ name: "max",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMax,
+ },
+ "merge": {
+ name: "merge",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}, variadic: true},
+ },
+ handler: jpfMerge,
+ },
+ "max_by": {
+ name: "max_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMaxBy,
+ hasExpRef: true,
+ },
+ "sum": {
+ name: "sum",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfSum,
+ },
+ "min": {
+ name: "min",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMin,
+ },
+ "min_by": {
+ name: "min_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMinBy,
+ hasExpRef: true,
+ },
+ "type": {
+ name: "type",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfType,
+ },
+ "keys": {
+ name: "keys",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfKeys,
+ },
+ "values": {
+ name: "values",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfValues,
+ },
+ "sort": {
+ name: "sort",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayString, jpArrayNumber}},
+ },
+ handler: jpfSort,
+ },
+ "sort_by": {
+ name: "sort_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfSortBy,
+ hasExpRef: true,
+ },
+ "join": {
+ name: "join",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpArrayString}},
+ },
+ handler: jpfJoin,
+ },
+ "reverse": {
+ name: "reverse",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ },
+ handler: jpfReverse,
+ },
+ "to_array": {
+ name: "to_array",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToArray,
+ },
+ "to_string": {
+ name: "to_string",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToString,
+ },
+ "to_number": {
+ name: "to_number",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToNumber,
+ },
+ "not_null": {
+ name: "not_null",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}, variadic: true},
+ },
+ handler: jpfNotNull,
+ },
+ }
+ return caller
+}
+
+func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
+ if len(e.arguments) == 0 {
+ return arguments, nil
+ }
+ if !e.arguments[len(e.arguments)-1].variadic {
+ if len(e.arguments) != len(arguments) {
+ return nil, errors.New("incorrect number of args")
+ }
+ for i, spec := range e.arguments {
+ userArg := arguments[i]
+ err := spec.typeCheck(userArg)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arguments, nil
+ }
+ if len(arguments) < len(e.arguments) {
+ return nil, errors.New("Invalid arity.")
+ }
+ return arguments, nil
+}
+
+func (a *argSpec) typeCheck(arg interface{}) error {
+ for _, t := range a.types {
+ switch t {
+ case jpNumber:
+ if _, ok := arg.(float64); ok {
+ return nil
+ }
+ case jpString:
+ if _, ok := arg.(string); ok {
+ return nil
+ }
+ case jpArray:
+ if isSliceType(arg) {
+ return nil
+ }
+ case jpObject:
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil
+ }
+ case jpArrayNumber:
+ if _, ok := toArrayNum(arg); ok {
+ return nil
+ }
+ case jpArrayString:
+ if _, ok := toArrayStr(arg); ok {
+ return nil
+ }
+ case jpAny:
+ return nil
+ case jpExpref:
+ if _, ok := arg.(expRef); ok {
+ return nil
+ }
+ }
+ }
+ return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
+}
+
+func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
+ entry, ok := f.functionTable[name]
+ if !ok {
+ return nil, errors.New("unknown function: " + name)
+ }
+ resolvedArgs, err := entry.resolveArgs(arguments)
+ if err != nil {
+ return nil, err
+ }
+ if entry.hasExpRef {
+ var extra []interface{}
+ extra = append(extra, intr)
+ resolvedArgs = append(extra, resolvedArgs...)
+ }
+ return entry.handler(resolvedArgs)
+}
+
+func jpfAbs(arguments []interface{}) (interface{}, error) {
+ num := arguments[0].(float64)
+ return math.Abs(num), nil
+}
+
+func jpfLength(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if c, ok := arg.(string); ok {
+ return float64(utf8.RuneCountInString(c)), nil
+ } else if isSliceType(arg) {
+ v := reflect.ValueOf(arg)
+ return float64(v.Len()), nil
+ } else if c, ok := arg.(map[string]interface{}); ok {
+ return float64(len(c)), nil
+ }
+ return nil, errors.New("could not compute length()")
+}
+
+func jpfStartsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ prefix := arguments[1].(string)
+ return strings.HasPrefix(search, prefix), nil
+}
+
+func jpfAvg(arguments []interface{}) (interface{}, error) {
+ // We've already type checked the value so we can safely use
+ // type assertions.
+ args := arguments[0].([]interface{})
+ length := float64(len(args))
+ numerator := 0.0
+ for _, n := range args {
+ numerator += n.(float64)
+ }
+ return numerator / length, nil
+}
+func jpfCeil(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Ceil(val), nil
+}
+func jpfContains(arguments []interface{}) (interface{}, error) {
+ search := arguments[0]
+ el := arguments[1]
+ if searchStr, ok := search.(string); ok {
+ if elStr, ok := el.(string); ok {
+ return strings.Index(searchStr, elStr) != -1, nil
+ }
+ return false, nil
+ }
+ // Otherwise this is a generic contains for []interface{}
+ general := search.([]interface{})
+ for _, item := range general {
+ if item == el {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+func jpfEndsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ suffix := arguments[1].(string)
+ return strings.HasSuffix(search, suffix), nil
+}
+func jpfFloor(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Floor(val), nil
+}
+func jpfMap(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ exp := arguments[1].(expRef)
+ node := exp.ref
+ arr := arguments[2].([]interface{})
+ mapped := make([]interface{}, 0, len(arr))
+ for _, value := range arr {
+ current, err := intr.Execute(node, value)
+ if err != nil {
+ return nil, err
+ }
+ mapped = append(mapped, current)
+ }
+ return mapped, nil
+}
+func jpfMax(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ // Otherwise we're dealing with a max() of strings.
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+}
+func jpfMerge(arguments []interface{}) (interface{}, error) {
+ final := make(map[string]interface{})
+ for _, m := range arguments {
+ mapped := m.(map[string]interface{})
+ for key, value := range mapped {
+ final[key] = value
+ }
+ }
+ return final, nil
+}
+func jpfMaxBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ switch t := start.(type) {
+ case float64:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ case string:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ default:
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfSum(arguments []interface{}) (interface{}, error) {
+ items, _ := toArrayNum(arguments[0])
+ sum := 0.0
+ for _, item := range items {
+ sum += item
+ }
+ return sum, nil
+}
+
+func jpfMin(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+}
+
+func jpfMinBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if t, ok := start.(float64); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else if t, ok := start.(string); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfType(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if _, ok := arg.(float64); ok {
+ return "number", nil
+ }
+ if _, ok := arg.(string); ok {
+ return "string", nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return "array", nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return "object", nil
+ }
+ if arg == nil {
+ return "null", nil
+ }
+ if arg == true || arg == false {
+ return "boolean", nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfKeys(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for key := range arg {
+ collected = append(collected, key)
+ }
+ return collected, nil
+}
+func jpfValues(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for _, value := range arg {
+ collected = append(collected, value)
+ }
+ return collected, nil
+}
+func jpfSort(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ d := sort.Float64Slice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+ }
+ // Otherwise we're dealing with sort()'ing strings.
+ items, _ := toArrayStr(arguments[0])
+ d := sort.StringSlice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+}
+func jpfSortBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return arr, nil
+ } else if len(arr) == 1 {
+ return arr, nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := start.(float64); ok {
+ sortable := &byExprFloat{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else if _, ok := start.(string); ok {
+ sortable := &byExprString{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfJoin(arguments []interface{}) (interface{}, error) {
+ sep := arguments[0].(string)
+ // We can't just do arguments[1].([]string), we have to
+ // manually convert each item to a string.
+ arrayStr := []string{}
+ for _, item := range arguments[1].([]interface{}) {
+ arrayStr = append(arrayStr, item.(string))
+ }
+ return strings.Join(arrayStr, sep), nil
+}
+func jpfReverse(arguments []interface{}) (interface{}, error) {
+ if s, ok := arguments[0].(string); ok {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r), nil
+ }
+ items := arguments[0].([]interface{})
+ length := len(items)
+ reversed := make([]interface{}, length)
+ for i, item := range items {
+ reversed[length-(i+1)] = item
+ }
+ return reversed, nil
+}
+func jpfToArray(arguments []interface{}) (interface{}, error) {
+ if _, ok := arguments[0].([]interface{}); ok {
+ return arguments[0], nil
+ }
+ return arguments[:1:1], nil
+}
+func jpfToString(arguments []interface{}) (interface{}, error) {
+ if v, ok := arguments[0].(string); ok {
+ return v, nil
+ }
+ result, err := json.Marshal(arguments[0])
+ if err != nil {
+ return nil, err
+ }
+ return string(result), nil
+}
+func jpfToNumber(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if v, ok := arg.(float64); ok {
+ return v, nil
+ }
+ if v, ok := arg.(string); ok {
+ conv, err := strconv.ParseFloat(v, 64)
+ if err != nil {
+ return nil, nil
+ }
+ return conv, nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return nil, nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil, nil
+ }
+ if arg == nil {
+ return nil, nil
+ }
+ if arg == true || arg == false {
+ return nil, nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfNotNull(arguments []interface{}) (interface{}, error) {
+ for _, arg := range arguments {
+ if arg != nil {
+ return arg, nil
+ }
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go
new file mode 100644
index 00000000..13c74604
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go
@@ -0,0 +1,418 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+/* This is a tree based interpreter. It walks the AST and directly
+ interprets the AST to search through a JSON document.
+*/
+
+type treeInterpreter struct {
+ fCall *functionCaller
+}
+
+func newInterpreter() *treeInterpreter {
+ interpreter := treeInterpreter{}
+ interpreter.fCall = newFunctionCaller()
+ return &interpreter
+}
+
+type expRef struct {
+ ref ASTNode
+}
+
+// Execute takes an ASTNode and input data and interprets the AST directly.
+// It will produce the result of applying the JMESPath expression associated
+// with the ASTNode to the input data "value".
+func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
+ switch node.nodeType {
+ case ASTComparator:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ right, err := intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ switch node.value {
+ case tEQ:
+ return objsEqual(left, right), nil
+ case tNE:
+ return !objsEqual(left, right), nil
+ }
+ leftNum, ok := left.(float64)
+ if !ok {
+ return nil, nil
+ }
+ rightNum, ok := right.(float64)
+ if !ok {
+ return nil, nil
+ }
+ switch node.value {
+ case tGT:
+ return leftNum > rightNum, nil
+ case tGTE:
+ return leftNum >= rightNum, nil
+ case tLT:
+ return leftNum < rightNum, nil
+ case tLTE:
+ return leftNum <= rightNum, nil
+ }
+ case ASTExpRef:
+ return expRef{ref: node.children[0]}, nil
+ case ASTFunctionExpression:
+ resolvedArgs := []interface{}{}
+ for _, arg := range node.children {
+ current, err := intr.Execute(arg, value)
+ if err != nil {
+ return nil, err
+ }
+ resolvedArgs = append(resolvedArgs, current)
+ }
+ return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
+ case ASTField:
+ if m, ok := value.(map[string]interface{}); ok {
+ key := node.value.(string)
+ return m[key], nil
+ }
+ return intr.fieldFromStruct(node.value.(string), value)
+ case ASTFilterProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.filterProjectionWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ for _, element := range sliceType {
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+ case ASTFlatten:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ // If we can't type convert to []interface{}, there's
+ // a chance this could still work via reflection if we're
+ // dealing with user provided types.
+ if isSliceType(left) {
+ return intr.flattenWithReflection(left)
+ }
+ return nil, nil
+ }
+ flattened := []interface{}{}
+ for _, element := range sliceType {
+ if elementSlice, ok := element.([]interface{}); ok {
+ flattened = append(flattened, elementSlice...)
+ } else if isSliceType(element) {
+ reflectFlat := []interface{}{}
+ v := reflect.ValueOf(element)
+ for i := 0; i < v.Len(); i++ {
+ reflectFlat = append(reflectFlat, v.Index(i).Interface())
+ }
+ flattened = append(flattened, reflectFlat...)
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+ case ASTIdentity, ASTCurrentNode:
+ return value, nil
+ case ASTIndex:
+ if sliceType, ok := value.([]interface{}); ok {
+ index := node.value.(int)
+ if index < 0 {
+ index += len(sliceType)
+ }
+ if index < len(sliceType) && index >= 0 {
+ return sliceType[index], nil
+ }
+ return nil, nil
+ }
+ // Otherwise try via reflection.
+ rv := reflect.ValueOf(value)
+ if rv.Kind() == reflect.Slice {
+ index := node.value.(int)
+ if index < 0 {
+ index += rv.Len()
+ }
+ if index < rv.Len() && index >= 0 {
+ v := rv.Index(index)
+ return v.Interface(), nil
+ }
+ }
+ return nil, nil
+ case ASTKeyValPair:
+ return intr.Execute(node.children[0], value)
+ case ASTLiteral:
+ return node.value, nil
+ case ASTMultiSelectHash:
+ if value == nil {
+ return nil, nil
+ }
+ collected := make(map[string]interface{})
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ key := child.value.(string)
+ collected[key] = current
+ }
+ return collected, nil
+ case ASTMultiSelectList:
+ if value == nil {
+ return nil, nil
+ }
+ collected := []interface{}{}
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ collected = append(collected, current)
+ }
+ return collected, nil
+ case ASTOrExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ matched, err = intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return matched, nil
+ case ASTAndExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return matched, nil
+ }
+ return intr.Execute(node.children[1], value)
+ case ASTNotExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return true, nil
+ }
+ return false, nil
+ case ASTPipe:
+ result := value
+ var err error
+ for _, child := range node.children {
+ result, err = intr.Execute(child, result)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return result, nil
+ case ASTProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.projectWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ collected := []interface{}{}
+ var current interface{}
+ for _, element := range sliceType {
+ current, err = intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ case ASTSubexpression, ASTIndexExpression:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(node.children[1], left)
+ case ASTSlice:
+ sliceType, ok := value.([]interface{})
+ if !ok {
+ if isSliceType(value) {
+ return intr.sliceWithReflection(node, value)
+ }
+ return nil, nil
+ }
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ return slice(sliceType, sliceParams)
+ case ASTValueProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ mapType, ok := left.(map[string]interface{})
+ if !ok {
+ return nil, nil
+ }
+ values := make([]interface{}, len(mapType))
+ for _, value := range mapType {
+ values = append(values, value)
+ }
+ collected := []interface{}{}
+ for _, element := range values {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ }
+ return nil, errors.New("Unknown AST node: " + node.nodeType.String())
+}
+
+func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
+ rv := reflect.ValueOf(value)
+ first, n := utf8.DecodeRuneInString(key)
+ fieldName := string(unicode.ToUpper(first)) + key[n:]
+ if rv.Kind() == reflect.Struct {
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ } else if rv.Kind() == reflect.Ptr {
+ // Handle multiple levels of indirection?
+ if rv.IsNil() {
+ return nil, nil
+ }
+ rv = rv.Elem()
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ }
+ return nil, nil
+}
+
+func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ flattened := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ if reflect.TypeOf(element).Kind() == reflect.Slice {
+ // Then insert the contents of the element
+ // slice into the flattened slice,
+ // i.e flattened = append(flattened, mySlice...)
+ elementV := reflect.ValueOf(element)
+ for j := 0; j < elementV.Len(); j++ {
+ flattened = append(
+ flattened, elementV.Index(j).Interface())
+ }
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+}
+
+func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ final := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ final = append(final, element)
+ }
+ return slice(final, sliceParams)
+}
+
+func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+}
+
+func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if result != nil {
+ collected = append(collected, result)
+ }
+ }
+ return collected, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go
new file mode 100644
index 00000000..817900c8
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/lexer.go
@@ -0,0 +1,420 @@
+package jmespath
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type token struct {
+ tokenType tokType
+ value string
+ position int
+ length int
+}
+
+type tokType int
+
+const eof = -1
+
+// Lexer contains information about the expression being tokenized.
+type Lexer struct {
+ expression string // The expression provided by the user.
+ currentPos int // The current position in the string.
+ lastWidth int // The width of the current rune. This
+ buf bytes.Buffer // Internal buffer used for building up values.
+}
+
+// SyntaxError is the main error used whenever a lexing or parsing error occurs.
+type SyntaxError struct {
+ msg string // Error message displayed to user
+ Expression string // Expression that generated a SyntaxError
+ Offset int // The location in the string where the error occurred
+}
+
+func (e SyntaxError) Error() string {
+ // In the future, it would be good to underline the specific
+ // location where the error occurred.
+ return "SyntaxError: " + e.msg
+}
+
+// HighlightLocation will show where the syntax error occurred.
+// It will place a "^" character on a line below the expression
+// at the point where the syntax error occurred.
+func (e SyntaxError) HighlightLocation() string {
+ return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
+}
+
+//go:generate stringer -type=tokType
+const (
+ tUnknown tokType = iota
+ tStar
+ tDot
+ tFilter
+ tFlatten
+ tLparen
+ tRparen
+ tLbracket
+ tRbracket
+ tLbrace
+ tRbrace
+ tOr
+ tPipe
+ tNumber
+ tUnquotedIdentifier
+ tQuotedIdentifier
+ tComma
+ tColon
+ tLT
+ tLTE
+ tGT
+ tGTE
+ tEQ
+ tNE
+ tJSONLiteral
+ tStringLiteral
+ tCurrent
+ tExpref
+ tAnd
+ tNot
+ tEOF
+)
+
+var basicTokens = map[rune]tokType{
+ '.': tDot,
+ '*': tStar,
+ ',': tComma,
+ ':': tColon,
+ '{': tLbrace,
+ '}': tRbrace,
+ ']': tRbracket, // tLbracket not included because it could be "[]"
+ '(': tLparen,
+ ')': tRparen,
+ '@': tCurrent,
+}
+
+// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
+// When using this bitmask just be sure to shift the rune down 64 bits
+// before checking against identifierStartBits.
+const identifierStartBits uint64 = 576460745995190270
+
+// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
+var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
+
+var whiteSpace = map[rune]bool{
+ ' ': true, '\t': true, '\n': true, '\r': true,
+}
+
+func (t token) String() string {
+ return fmt.Sprintf("Token{%+v, %s, %d, %d}",
+ t.tokenType, t.value, t.position, t.length)
+}
+
+// NewLexer creates a new JMESPath lexer.
+func NewLexer() *Lexer {
+ lexer := Lexer{}
+ return &lexer
+}
+
+func (lexer *Lexer) next() rune {
+ if lexer.currentPos >= len(lexer.expression) {
+ lexer.lastWidth = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
+ lexer.lastWidth = w
+ lexer.currentPos += w
+ return r
+}
+
+func (lexer *Lexer) back() {
+ lexer.currentPos -= lexer.lastWidth
+}
+
+func (lexer *Lexer) peek() rune {
+ t := lexer.next()
+ lexer.back()
+ return t
+}
+
+// tokenize takes an expression and returns corresponding tokens.
+func (lexer *Lexer) tokenize(expression string) ([]token, error) {
+ var tokens []token
+ lexer.expression = expression
+ lexer.currentPos = 0
+ lexer.lastWidth = 0
+loop:
+ for {
+ r := lexer.next()
+ if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
+ t := lexer.consumeUnquotedIdentifier()
+ tokens = append(tokens, t)
+ } else if val, ok := basicTokens[r]; ok {
+ // Basic single char token.
+ t := token{
+ tokenType: val,
+ value: string(r),
+ position: lexer.currentPos - lexer.lastWidth,
+ length: 1,
+ }
+ tokens = append(tokens, t)
+ } else if r == '-' || (r >= '0' && r <= '9') {
+ t := lexer.consumeNumber()
+ tokens = append(tokens, t)
+ } else if r == '[' {
+ t := lexer.consumeLBracket()
+ tokens = append(tokens, t)
+ } else if r == '"' {
+ t, err := lexer.consumeQuotedIdentifier()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '\'' {
+ t, err := lexer.consumeRawStringLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '`' {
+ t, err := lexer.consumeLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '|' {
+ t := lexer.matchOrElse(r, '|', tOr, tPipe)
+ tokens = append(tokens, t)
+ } else if r == '<' {
+ t := lexer.matchOrElse(r, '=', tLTE, tLT)
+ tokens = append(tokens, t)
+ } else if r == '>' {
+ t := lexer.matchOrElse(r, '=', tGTE, tGT)
+ tokens = append(tokens, t)
+ } else if r == '!' {
+ t := lexer.matchOrElse(r, '=', tNE, tNot)
+ tokens = append(tokens, t)
+ } else if r == '=' {
+ t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
+ tokens = append(tokens, t)
+ } else if r == '&' {
+ t := lexer.matchOrElse(r, '&', tAnd, tExpref)
+ tokens = append(tokens, t)
+ } else if r == eof {
+ break loop
+ } else if _, ok := whiteSpace[r]; ok {
+ // Ignore whitespace
+ } else {
+ return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
+ }
+ }
+ tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
+ return tokens, nil
+}
+
+// Consume characters until the ending rune "r" is reached.
+// If the end of the expression is reached before seeing the
+// terminating rune "r", then an error is returned.
+// If no error occurs then the matching substring is returned.
+// The returned string will not include the ending rune.
+func (lexer *Lexer) consumeUntil(end rune) (string, error) {
+ start := lexer.currentPos
+ current := lexer.next()
+ for current != end && current != eof {
+ if current == '\\' && lexer.peek() != eof {
+ lexer.next()
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return "", SyntaxError{
+ msg: "Unclosed delimiter: " + string(end),
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
+}
+
+func (lexer *Lexer) consumeLiteral() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('`')
+ if err != nil {
+ return token{}, err
+ }
+ value = strings.Replace(value, "\\`", "`", -1)
+ return token{
+ tokenType: tJSONLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
+ start := lexer.currentPos
+ currentIndex := start
+ current := lexer.next()
+ for current != '\'' && lexer.peek() != eof {
+ if current == '\\' && lexer.peek() == '\'' {
+ chunk := lexer.expression[currentIndex : lexer.currentPos-1]
+ lexer.buf.WriteString(chunk)
+ lexer.buf.WriteString("'")
+ lexer.next()
+ currentIndex = lexer.currentPos
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return token{}, SyntaxError{
+ msg: "Unclosed delimiter: '",
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ if currentIndex < lexer.currentPos {
+ lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
+ }
+ value := lexer.buf.String()
+ // Reset the buffer so it can reused again.
+ lexer.buf.Reset()
+ return token{
+ tokenType: tStringLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: lexer.expression,
+ Offset: lexer.currentPos - 1,
+ }
+}
+
+// Checks for a two char token, otherwise matches a single character
+// token. This is used whenever a two char token overlaps a single
+// char token, e.g. "||" -> tPipe, "|" -> tOr.
+func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == second {
+ t = token{
+ tokenType: matchedType,
+ value: string(first) + string(second),
+ position: start,
+ length: 2,
+ }
+ } else {
+ lexer.back()
+ t = token{
+ tokenType: singleCharType,
+ value: string(first),
+ position: start,
+ length: 1,
+ }
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeLBracket() token {
+ // There's three options here:
+ // 1. A filter expression "[?"
+ // 2. A flatten operator "[]"
+ // 3. A bare rbracket "["
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == '?' {
+ t = token{
+ tokenType: tFilter,
+ value: "[?",
+ position: start,
+ length: 2,
+ }
+ } else if nextRune == ']' {
+ t = token{
+ tokenType: tFlatten,
+ value: "[]",
+ position: start,
+ length: 2,
+ }
+ } else {
+ t = token{
+ tokenType: tLbracket,
+ value: "[",
+ position: start,
+ length: 1,
+ }
+ lexer.back()
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('"')
+ if err != nil {
+ return token{}, err
+ }
+ var decoded string
+ asJSON := []byte("\"" + value + "\"")
+ if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
+ return token{}, err
+ }
+ return token{
+ tokenType: tQuotedIdentifier,
+ value: decoded,
+ position: start - 1,
+ length: len(decoded),
+ }, nil
+}
+
+func (lexer *Lexer) consumeUnquotedIdentifier() token {
+ // Consume runes until we reach the end of an unquoted
+ // identifier.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tUnquotedIdentifier,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
+
+func (lexer *Lexer) consumeNumber() token {
+ // Consume runes until we reach something that's not a number.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < '0' || r > '9' {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tNumber,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go
new file mode 100644
index 00000000..1240a175
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/parser.go
@@ -0,0 +1,603 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type astNodeType int
+
+//go:generate stringer -type astNodeType
+const (
+ ASTEmpty astNodeType = iota
+ ASTComparator
+ ASTCurrentNode
+ ASTExpRef
+ ASTFunctionExpression
+ ASTField
+ ASTFilterProjection
+ ASTFlatten
+ ASTIdentity
+ ASTIndex
+ ASTIndexExpression
+ ASTKeyValPair
+ ASTLiteral
+ ASTMultiSelectHash
+ ASTMultiSelectList
+ ASTOrExpression
+ ASTAndExpression
+ ASTNotExpression
+ ASTPipe
+ ASTProjection
+ ASTSubexpression
+ ASTSlice
+ ASTValueProjection
+)
+
+// ASTNode represents the abstract syntax tree of a JMESPath expression.
+type ASTNode struct {
+ nodeType astNodeType
+ value interface{}
+ children []ASTNode
+}
+
+func (node ASTNode) String() string {
+ return node.PrettyPrint(0)
+}
+
+// PrettyPrint will pretty print the parsed AST.
+// The AST is an implementation detail and this pretty print
+// function is provided as a convenience method to help with
+// debugging. You should not rely on its output as the internal
+// structure of the AST may change at any time.
+func (node ASTNode) PrettyPrint(indent int) string {
+ spaces := strings.Repeat(" ", indent)
+ output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
+ nextIndent := indent + 2
+ if node.value != nil {
+ if converted, ok := node.value.(fmt.Stringer); ok {
+ // Account for things like comparator nodes
+ // that are enums with a String() method.
+ output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
+ } else {
+ output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
+ }
+ }
+ lastIndex := len(node.children)
+ if lastIndex > 0 {
+ output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
+ childIndent := nextIndent + 2
+ for _, elem := range node.children {
+ output += elem.PrettyPrint(childIndent)
+ }
+ }
+ output += fmt.Sprintf("%s}\n", spaces)
+ return output
+}
+
+var bindingPowers = map[tokType]int{
+ tEOF: 0,
+ tUnquotedIdentifier: 0,
+ tQuotedIdentifier: 0,
+ tRbracket: 0,
+ tRparen: 0,
+ tComma: 0,
+ tRbrace: 0,
+ tNumber: 0,
+ tCurrent: 0,
+ tExpref: 0,
+ tColon: 0,
+ tPipe: 1,
+ tOr: 2,
+ tAnd: 3,
+ tEQ: 5,
+ tLT: 5,
+ tLTE: 5,
+ tGT: 5,
+ tGTE: 5,
+ tNE: 5,
+ tFlatten: 9,
+ tStar: 20,
+ tFilter: 21,
+ tDot: 40,
+ tNot: 45,
+ tLbrace: 50,
+ tLbracket: 55,
+ tLparen: 60,
+}
+
+// Parser holds state about the current expression being parsed.
+type Parser struct {
+ expression string
+ tokens []token
+ index int
+}
+
+// NewParser creates a new JMESPath parser.
+func NewParser() *Parser {
+ p := Parser{}
+ return &p
+}
+
+// Parse will compile a JMESPath expression.
+func (p *Parser) Parse(expression string) (ASTNode, error) {
+ lexer := NewLexer()
+ p.expression = expression
+ p.index = 0
+ tokens, err := lexer.tokenize(expression)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ p.tokens = tokens
+ parsed, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() != tEOF {
+ return ASTNode{}, p.syntaxError(fmt.Sprintf(
+ "Unexpected token at the end of the expresssion: %s", p.current()))
+ }
+ return parsed, nil
+}
+
+func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
+ var err error
+ leftToken := p.lookaheadToken(0)
+ p.advance()
+ leftNode, err := p.nud(leftToken)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken := p.current()
+ for bindingPower < bindingPowers[currentToken] {
+ p.advance()
+ leftNode, err = p.led(currentToken, leftNode)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken = p.current()
+ }
+ return leftNode, nil
+}
+
+func (p *Parser) parseIndexExpression() (ASTNode, error) {
+ if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
+ return p.parseSliceExpression()
+ }
+ indexStr := p.lookaheadToken(0).value
+ parsedInt, err := strconv.Atoi(indexStr)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
+ p.advance()
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return indexNode, nil
+}
+
+func (p *Parser) parseSliceExpression() (ASTNode, error) {
+ parts := []*int{nil, nil, nil}
+ index := 0
+ current := p.current()
+ for current != tRbracket && index < 3 {
+ if current == tColon {
+ index++
+ p.advance()
+ } else if current == tNumber {
+ parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ parts[index] = &parsedInt
+ p.advance()
+ } else {
+ return ASTNode{}, p.syntaxError(
+ "Expected tColon or tNumber" + ", received: " + p.current().String())
+ }
+ current = p.current()
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTSlice,
+ value: parts,
+ }, nil
+}
+
+func (p *Parser) match(tokenType tokType) error {
+ if p.current() == tokenType {
+ p.advance()
+ return nil
+ }
+ return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
+}
+
+func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
+ switch tokenType {
+ case tDot:
+ if p.current() != tStar {
+ right, err := p.parseDotRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTSubexpression,
+ children: []ASTNode{node, right},
+ }, err
+ }
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTValueProjection,
+ children: []ASTNode{node, right},
+ }, err
+ case tPipe:
+ right, err := p.parseExpression(bindingPowers[tPipe])
+ return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
+ case tOr:
+ right, err := p.parseExpression(bindingPowers[tOr])
+ return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
+ case tAnd:
+ right, err := p.parseExpression(bindingPowers[tAnd])
+ return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
+ case tLparen:
+ name := node.value
+ var args []ASTNode
+ for p.current() != tRparen {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tComma {
+ if err := p.match(tComma); err != nil {
+ return ASTNode{}, err
+ }
+ }
+ args = append(args, expression)
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTFunctionExpression,
+ value: name,
+ children: args,
+ }, nil
+ case tFilter:
+ return p.parseFilter(node)
+ case tFlatten:
+ left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{left, right},
+ }, err
+ case tEQ, tNE, tGT, tGTE, tLT, tLTE:
+ right, err := p.parseExpression(bindingPowers[tokenType])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTComparator,
+ value: tokenType,
+ children: []ASTNode{node, right},
+ }, nil
+ case tLbracket:
+ tokenType := p.current()
+ var right ASTNode
+ var err error
+ if tokenType == tNumber || tokenType == tColon {
+ right, err = p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.projectIfSlice(node, right)
+ }
+ // Otherwise this is a projection.
+ if err := p.match(tStar); err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{node, right},
+ }, nil
+ }
+ return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
+}
+
+func (p *Parser) nud(token token) (ASTNode, error) {
+ switch token.tokenType {
+ case tJSONLiteral:
+ var parsed interface{}
+ err := json.Unmarshal([]byte(token.value), &parsed)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
+ case tStringLiteral:
+ return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
+ case tUnquotedIdentifier:
+ return ASTNode{
+ nodeType: ASTField,
+ value: token.value,
+ }, nil
+ case tQuotedIdentifier:
+ node := ASTNode{nodeType: ASTField, value: token.value}
+ if p.current() == tLparen {
+ return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
+ }
+ return node, nil
+ case tStar:
+ left := ASTNode{nodeType: ASTIdentity}
+ var right ASTNode
+ var err error
+ if p.current() == tRbracket {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ }
+ return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
+ case tFilter:
+ return p.parseFilter(ASTNode{nodeType: ASTIdentity})
+ case tLbrace:
+ return p.parseMultiSelectHash()
+ case tFlatten:
+ left := ASTNode{
+ nodeType: ASTFlatten,
+ children: []ASTNode{{nodeType: ASTIdentity}},
+ }
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
+ case tLbracket:
+ tokenType := p.current()
+ //var right ASTNode
+ if tokenType == tNumber || tokenType == tColon {
+ right, err := p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
+ } else if tokenType == tStar && p.lookahead(1) == tRbracket {
+ p.advance()
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{{nodeType: ASTIdentity}, right},
+ }, nil
+ } else {
+ return p.parseMultiSelectList()
+ }
+ case tCurrent:
+ return ASTNode{nodeType: ASTCurrentNode}, nil
+ case tExpref:
+ expression, err := p.parseExpression(bindingPowers[tExpref])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
+ case tNot:
+ expression, err := p.parseExpression(bindingPowers[tNot])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
+ case tLparen:
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return expression, nil
+ case tEOF:
+ return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
+ }
+
+ return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
+}
+
+func (p *Parser) parseMultiSelectList() (ASTNode, error) {
+ var expressions []ASTNode
+ for {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ expressions = append(expressions, expression)
+ if p.current() == tRbracket {
+ break
+ }
+ err = p.match(tComma)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+ err := p.match(tRbracket)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectList,
+ children: expressions,
+ }, nil
+}
+
+func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
+ var children []ASTNode
+ for {
+ keyToken := p.lookaheadToken(0)
+ if err := p.match(tUnquotedIdentifier); err != nil {
+ if err := p.match(tQuotedIdentifier); err != nil {
+ return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
+ }
+ }
+ keyName := keyToken.value
+ err := p.match(tColon)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ value, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ node := ASTNode{
+ nodeType: ASTKeyValPair,
+ value: keyName,
+ children: []ASTNode{value},
+ }
+ children = append(children, node)
+ if p.current() == tComma {
+ err := p.match(tComma)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ } else if p.current() == tRbrace {
+ err := p.match(tRbrace)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ break
+ }
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectHash,
+ children: children,
+ }, nil
+}
+
+func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
+ indexExpr := ASTNode{
+ nodeType: ASTIndexExpression,
+ children: []ASTNode{left, right},
+ }
+ if right.nodeType == ASTSlice {
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{indexExpr, right},
+ }, err
+ }
+ return indexExpr, nil
+}
+func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
+ var right, condition ASTNode
+ var err error
+ condition, err = p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tFlatten {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tFilter])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+
+ return ASTNode{
+ nodeType: ASTFilterProjection,
+ children: []ASTNode{node, right, condition},
+ }, nil
+}
+
+func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
+ lookahead := p.current()
+ if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
+ return p.parseExpression(bindingPower)
+ } else if lookahead == tLbracket {
+ if err := p.match(tLbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectList()
+ } else if lookahead == tLbrace {
+ if err := p.match(tLbrace); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectHash()
+ }
+ return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
+}
+
+func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
+ current := p.current()
+ if bindingPowers[current] < 10 {
+ return ASTNode{nodeType: ASTIdentity}, nil
+ } else if current == tLbracket {
+ return p.parseExpression(bindingPower)
+ } else if current == tFilter {
+ return p.parseExpression(bindingPower)
+ } else if current == tDot {
+ err := p.match(tDot)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseDotRHS(bindingPower)
+ } else {
+ return ASTNode{}, p.syntaxError("Error")
+ }
+}
+
+func (p *Parser) lookahead(number int) tokType {
+ return p.lookaheadToken(number).tokenType
+}
+
+func (p *Parser) current() tokType {
+ return p.lookahead(0)
+}
+
+func (p *Parser) lookaheadToken(number int) token {
+ return p.tokens[p.index+number]
+}
+
+func (p *Parser) advance() {
+ p.index++
+}
+
+func tokensOneOf(elements []tokType, token tokType) bool {
+ for _, elem := range elements {
+ if elem == token {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Parser) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: p.lookaheadToken(0).position,
+ }
+}
+
+// Create a SyntaxError based on the provided token.
+// This differs from syntaxError() which creates a SyntaxError
+// based on the current lookahead token.
+func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: t.position,
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
new file mode 100644
index 00000000..dae79cbd
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=tokType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
+
+var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
+
+func (i tokType) String() string {
+ if i < 0 || i >= tokType(len(_tokType_index)-1) {
+ return fmt.Sprintf("tokType(%d)", i)
+ }
+ return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go
new file mode 100644
index 00000000..ddc1b7d7
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/util.go
@@ -0,0 +1,185 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+)
+
+// IsFalse determines if an object is false based on the JMESPath spec.
+// JMESPath defines false values to be any of:
+// - An empty string array, or hash.
+// - The boolean value false.
+// - nil
+func isFalse(value interface{}) bool {
+ switch v := value.(type) {
+ case bool:
+ return !v
+ case []interface{}:
+ return len(v) == 0
+ case map[string]interface{}:
+ return len(v) == 0
+ case string:
+ return len(v) == 0
+ case nil:
+ return true
+ }
+ // Try the reflection cases before returning false.
+ rv := reflect.ValueOf(value)
+ switch rv.Kind() {
+ case reflect.Struct:
+ // A struct type will never be false, even if
+ // all of its values are the zero type.
+ return false
+ case reflect.Slice, reflect.Map:
+ return rv.Len() == 0
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return true
+ }
+ // If it's a pointer type, we'll try to deref the pointer
+ // and evaluate the pointer value for isFalse.
+ element := rv.Elem()
+ return isFalse(element.Interface())
+ }
+ return false
+}
+
+// ObjsEqual is a generic object equality check.
+// It will take two arbitrary objects and recursively determine
+// if they are equal.
+func objsEqual(left interface{}, right interface{}) bool {
+ return reflect.DeepEqual(left, right)
+}
+
+// SliceParam refers to a single part of a slice.
+// A slice consists of a start, a stop, and a step, similar to
+// python slices.
+type sliceParam struct {
+ N int
+ Specified bool
+}
+
+// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
+func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
+ computed, err := computeSliceParams(len(slice), parts)
+ if err != nil {
+ return nil, err
+ }
+ start, stop, step := computed[0], computed[1], computed[2]
+ result := []interface{}{}
+ if step > 0 {
+ for i := start; i < stop; i += step {
+ result = append(result, slice[i])
+ }
+ } else {
+ for i := start; i > stop; i += step {
+ result = append(result, slice[i])
+ }
+ }
+ return result, nil
+}
+
+func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
+ var start, stop, step int
+ if !parts[2].Specified {
+ step = 1
+ } else if parts[2].N == 0 {
+ return nil, errors.New("Invalid slice, step cannot be 0")
+ } else {
+ step = parts[2].N
+ }
+ var stepValueNegative bool
+ if step < 0 {
+ stepValueNegative = true
+ } else {
+ stepValueNegative = false
+ }
+
+ if !parts[0].Specified {
+ if stepValueNegative {
+ start = length - 1
+ } else {
+ start = 0
+ }
+ } else {
+ start = capSlice(length, parts[0].N, step)
+ }
+
+ if !parts[1].Specified {
+ if stepValueNegative {
+ stop = -1
+ } else {
+ stop = length
+ }
+ } else {
+ stop = capSlice(length, parts[1].N, step)
+ }
+ return []int{start, stop, step}, nil
+}
+
+func capSlice(length int, actual int, step int) int {
+ if actual < 0 {
+ actual += length
+ if actual < 0 {
+ if step < 0 {
+ actual = -1
+ } else {
+ actual = 0
+ }
+ }
+ } else if actual >= length {
+ if step < 0 {
+ actual = length - 1
+ } else {
+ actual = length
+ }
+ }
+ return actual
+}
+
+// ToArrayNum converts an empty interface type to a slice of float64.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false.
+func toArrayNum(data interface{}) ([]float64, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]float64, len(d))
+ for i, el := range d {
+ item, ok := el.(float64)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+// ToArrayStr converts an empty interface type to a slice of strings.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false. If the input data could be entirely
+// converted, then the converted data, along with a second value of true,
+// will be returned.
+func toArrayStr(data interface{}) ([]string, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]string, len(d))
+ for i, el := range d {
+ item, ok := el.(string)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+func isSliceType(v interface{}) bool {
+ if v == nil {
+ return false
+ }
+ return reflect.TypeOf(v).Kind() == reflect.Slice
+}
diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE
new file mode 100644
index 00000000..22985159
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md
new file mode 100644
index 00000000..bcb8c8d2
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/README.md
@@ -0,0 +1,21 @@
+# copystructure
+
+copystructure is a Go library for deep copying values in Go.
+
+This allows you to copy Go values that may contain reference values
+such as maps, slices, or pointers, and copy their data as well instead
+of just their references.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/copystructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure).
+
+The `Copy` function has examples associated with it there.
diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go
new file mode 100644
index 00000000..db6a6aa1
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/copier_time.go
@@ -0,0 +1,15 @@
+package copystructure
+
+import (
+ "reflect"
+ "time"
+)
+
+func init() {
+ Copiers[reflect.TypeOf(time.Time{})] = timeCopier
+}
+
+func timeCopier(v interface{}) (interface{}, error) {
+ // Just... copy it.
+ return v.(time.Time), nil
+}
diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go
new file mode 100644
index 00000000..ca658132
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/copystructure.go
@@ -0,0 +1,545 @@
+package copystructure
+
+import (
+ "errors"
+ "reflect"
+ "sync"
+
+ "github.com/mitchellh/reflectwalk"
+)
+
+// Copy returns a deep copy of v.
+func Copy(v interface{}) (interface{}, error) {
+ return Config{}.Copy(v)
+}
+
+// CopierFunc is a function that knows how to deep copy a specific type.
+// Register these globally with the Copiers variable.
+type CopierFunc func(interface{}) (interface{}, error)
+
+// Copiers is a map of types that behave specially when they are copied.
+// If a type is found in this map while deep copying, this function
+// will be called to copy it instead of attempting to copy all fields.
+//
+// The key should be the type, obtained using: reflect.TypeOf(value with type).
+//
+// It is unsafe to write to this map after Copies have started. If you
+// are writing to this map while also copying, wrap all modifications to
+// this map as well as to Copy in a mutex.
+var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc)
+
+// Must is a helper that wraps a call to a function returning
+// (interface{}, error) and panics if the error is non-nil. It is intended
+// for use in variable initializations and should only be used when a copy
+// error should be a crashing case.
+func Must(v interface{}, err error) interface{} {
+ if err != nil {
+ panic("copy error: " + err.Error())
+ }
+
+ return v
+}
+
+var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true")
+
+type Config struct {
+ // Lock any types that are a sync.Locker and are not a mutex while copying.
+ // If there is an RLocker method, use that to get the sync.Locker.
+ Lock bool
+
+ // Copiers is a map of types associated with a CopierFunc. Use the global
+ // Copiers map if this is nil.
+ Copiers map[reflect.Type]CopierFunc
+}
+
+func (c Config) Copy(v interface{}) (interface{}, error) {
+ if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr {
+ return nil, errPointerRequired
+ }
+
+ w := new(walker)
+ if c.Lock {
+ w.useLocks = true
+ }
+
+ if c.Copiers == nil {
+ c.Copiers = Copiers
+ }
+
+ err := reflectwalk.Walk(v, w)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the result. If the result is nil, then we want to turn it
+ // into a typed nil if we can.
+ result := w.Result
+ if result == nil {
+ val := reflect.ValueOf(v)
+ result = reflect.Indirect(reflect.New(val.Type())).Interface()
+ }
+
+ return result, nil
+}
+
+// Return the key used to index interfaces types we've seen. Store the number
+// of pointers in the upper 32bits, and the depth in the lower 32bits. This is
+// easy to calculate, easy to match a key with our current depth, and we don't
+// need to deal with initializing and cleaning up nested maps or slices.
+func ifaceKey(pointers, depth int) uint64 {
+ return uint64(pointers)<<32 | uint64(depth)
+}
+
+type walker struct {
+ Result interface{}
+
+ depth int
+ ignoreDepth int
+ vals []reflect.Value
+ cs []reflect.Value
+
+ // This stores the number of pointers we've walked over, indexed by depth.
+ ps []int
+
+ // If an interface is indirected by a pointer, we need to know the type of
+ // interface to create when creating the new value. Store the interface
+ // types here, indexed by both the walk depth and the number of pointers
+ // already seen at that depth. Use ifaceKey to calculate the proper uint64
+ // value.
+ ifaceTypes map[uint64]reflect.Type
+
+ // any locks we've taken, indexed by depth
+ locks []sync.Locker
+ // take locks while walking the structure
+ useLocks bool
+}
+
+func (w *walker) Enter(l reflectwalk.Location) error {
+ w.depth++
+
+ // ensure we have enough elements to index via w.depth
+ for w.depth >= len(w.locks) {
+ w.locks = append(w.locks, nil)
+ }
+
+ for len(w.ps) < w.depth+1 {
+ w.ps = append(w.ps, 0)
+ }
+
+ return nil
+}
+
+func (w *walker) Exit(l reflectwalk.Location) error {
+ locker := w.locks[w.depth]
+ w.locks[w.depth] = nil
+ if locker != nil {
+ defer locker.Unlock()
+ }
+
+ // clear out pointers and interfaces as we exit the stack
+ w.ps[w.depth] = 0
+
+ for k := range w.ifaceTypes {
+ mask := uint64(^uint32(0))
+ if k&mask == uint64(w.depth) {
+ delete(w.ifaceTypes, k)
+ }
+ }
+
+ w.depth--
+ if w.ignoreDepth > w.depth {
+ w.ignoreDepth = 0
+ }
+
+ if w.ignoring() {
+ return nil
+ }
+
+ switch l {
+ case reflectwalk.Array:
+ fallthrough
+ case reflectwalk.Map:
+ fallthrough
+ case reflectwalk.Slice:
+ w.replacePointerMaybe()
+
+ // Pop map off our container
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.MapValue:
+ // Pop off the key and value
+ mv := w.valPop()
+ mk := w.valPop()
+ m := w.cs[len(w.cs)-1]
+
+ // If mv is the zero value, SetMapIndex deletes the key form the map,
+ // or in this case never adds it. We need to create a properly typed
+ // zero value so that this key can be set.
+ if !mv.IsValid() {
+ mv = reflect.Zero(m.Elem().Type().Elem())
+ }
+ m.Elem().SetMapIndex(mk, mv)
+ case reflectwalk.ArrayElem:
+ // Pop off the value and the index and set it on the array
+ v := w.valPop()
+ i := w.valPop().Interface().(int)
+ if v.IsValid() {
+ a := w.cs[len(w.cs)-1]
+ ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call
+ if ae.CanSet() {
+ ae.Set(v)
+ }
+ }
+ case reflectwalk.SliceElem:
+ // Pop off the value and the index and set it on the slice
+ v := w.valPop()
+ i := w.valPop().Interface().(int)
+ if v.IsValid() {
+ s := w.cs[len(w.cs)-1]
+ se := s.Elem().Index(i)
+ if se.CanSet() {
+ se.Set(v)
+ }
+ }
+ case reflectwalk.Struct:
+ w.replacePointerMaybe()
+
+ // Remove the struct from the container stack
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.StructField:
+ // Pop off the value and the field
+ v := w.valPop()
+ f := w.valPop().Interface().(reflect.StructField)
+ if v.IsValid() {
+ s := w.cs[len(w.cs)-1]
+ sf := reflect.Indirect(s).FieldByName(f.Name)
+
+ if sf.CanSet() {
+ sf.Set(v)
+ }
+ }
+ case reflectwalk.WalkLoc:
+ // Clear out the slices for GC
+ w.cs = nil
+ w.vals = nil
+ }
+
+ return nil
+}
+
+func (w *walker) Map(m reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+ w.lock(m)
+
+ // Create the map. If the map itself is nil, then just make a nil map
+ var newMap reflect.Value
+ if m.IsNil() {
+ newMap = reflect.New(m.Type())
+ } else {
+ newMap = wrapPtr(reflect.MakeMap(m.Type()))
+ }
+
+ w.cs = append(w.cs, newMap)
+ w.valPush(newMap)
+ return nil
+}
+
+func (w *walker) MapElem(m, k, v reflect.Value) error {
+ return nil
+}
+
+func (w *walker) PointerEnter(v bool) error {
+ if v {
+ w.ps[w.depth]++
+ }
+ return nil
+}
+
+func (w *walker) PointerExit(v bool) error {
+ if v {
+ w.ps[w.depth]--
+ }
+ return nil
+}
+
+func (w *walker) Interface(v reflect.Value) error {
+ if !v.IsValid() {
+ return nil
+ }
+ if w.ifaceTypes == nil {
+ w.ifaceTypes = make(map[uint64]reflect.Type)
+ }
+
+ w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type()
+ return nil
+}
+
+func (w *walker) Primitive(v reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+ w.lock(v)
+
+ // IsValid verifies the v is non-zero and CanInterface verifies
+ // that we're allowed to read this value (unexported fields).
+ var newV reflect.Value
+ if v.IsValid() && v.CanInterface() {
+ newV = reflect.New(v.Type())
+ newV.Elem().Set(v)
+ }
+
+ w.valPush(newV)
+ w.replacePointerMaybe()
+ return nil
+}
+
+func (w *walker) Slice(s reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+ w.lock(s)
+
+ var newS reflect.Value
+ if s.IsNil() {
+ newS = reflect.New(s.Type())
+ } else {
+ newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap()))
+ }
+
+ w.cs = append(w.cs, newS)
+ w.valPush(newS)
+ return nil
+}
+
+func (w *walker) SliceElem(i int, elem reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+
+ // We don't write the slice here because elem might still be
+ // arbitrarily complex. Just record the index and continue on.
+ w.valPush(reflect.ValueOf(i))
+
+ return nil
+}
+
+func (w *walker) Array(a reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+ w.lock(a)
+
+ newA := reflect.New(a.Type())
+
+ w.cs = append(w.cs, newA)
+ w.valPush(newA)
+ return nil
+}
+
+func (w *walker) ArrayElem(i int, elem reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+
+ // We don't write the array here because elem might still be
+ // arbitrarily complex. Just record the index and continue on.
+ w.valPush(reflect.ValueOf(i))
+
+ return nil
+}
+
+func (w *walker) Struct(s reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+ w.lock(s)
+
+ var v reflect.Value
+ if c, ok := Copiers[s.Type()]; ok {
+ // We have a Copier for this struct, so we use that copier to
+ // get the copy, and we ignore anything deeper than this.
+ w.ignoreDepth = w.depth
+
+ dup, err := c(s.Interface())
+ if err != nil {
+ return err
+ }
+
+ v = reflect.ValueOf(dup)
+ } else {
+ // No copier, we copy ourselves and allow reflectwalk to guide
+ // us deeper into the structure for copying.
+ v = reflect.New(s.Type())
+ }
+
+ // Push the value onto the value stack for setting the struct field,
+ // and add the struct itself to the containers stack in case we walk
+ // deeper so that its own fields can be modified.
+ w.valPush(v)
+ w.cs = append(w.cs, v)
+
+ return nil
+}
+
+func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+
+ // If PkgPath is non-empty, this is a private (unexported) field.
+ // We do not set this unexported since the Go runtime doesn't allow us.
+ if f.PkgPath != "" {
+ return reflectwalk.SkipEntry
+ }
+
+ // Push the field onto the stack, we'll handle it when we exit
+ // the struct field in Exit...
+ w.valPush(reflect.ValueOf(f))
+ return nil
+}
+
+// ignore causes the walker to ignore any more values until we exit this on
+func (w *walker) ignore() {
+ w.ignoreDepth = w.depth
+}
+
+func (w *walker) ignoring() bool {
+ return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth
+}
+
+func (w *walker) pointerPeek() bool {
+ return w.ps[w.depth] > 0
+}
+
+func (w *walker) valPop() reflect.Value {
+ result := w.vals[len(w.vals)-1]
+ w.vals = w.vals[:len(w.vals)-1]
+
+ // If we're out of values, that means we popped everything off. In
+ // this case, we reset the result so the next pushed value becomes
+ // the result.
+ if len(w.vals) == 0 {
+ w.Result = nil
+ }
+
+ return result
+}
+
+func (w *walker) valPush(v reflect.Value) {
+ w.vals = append(w.vals, v)
+
+ // If we haven't set the result yet, then this is the result since
+ // it is the first (outermost) value we're seeing.
+ if w.Result == nil && v.IsValid() {
+ w.Result = v.Interface()
+ }
+}
+
+func (w *walker) replacePointerMaybe() {
+ // Determine the last pointer value. If it is NOT a pointer, then
+ // we need to push that onto the stack.
+ if !w.pointerPeek() {
+ w.valPush(reflect.Indirect(w.valPop()))
+ return
+ }
+
+ v := w.valPop()
+
+ // If the expected type is a pointer to an interface of any depth,
+ // such as *interface{}, **interface{}, etc., then we need to convert
+ // the value "v" from *CONCRETE to *interface{} so types match for
+ // Set.
+ //
+ // Example if v is type *Foo where Foo is a struct, v would become
+ // *interface{} instead. This only happens if we have an interface expectation
+ // at this depth.
+ //
+ // For more info, see GH-16
+ if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface {
+ y := reflect.New(iType) // Create *interface{}
+ y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced)
+ v = y // v is now typed *interface{} (where *v = Foo)
+ }
+
+ for i := 1; i < w.ps[w.depth]; i++ {
+ if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
+ iface := reflect.New(iType).Elem()
+ iface.Set(v)
+ v = iface
+ }
+
+ p := reflect.New(v.Type())
+ p.Elem().Set(v)
+ v = p
+ }
+
+ w.valPush(v)
+}
+
+// if this value is a Locker, lock it and add it to the locks slice
+func (w *walker) lock(v reflect.Value) {
+ if !w.useLocks {
+ return
+ }
+
+ if !v.IsValid() || !v.CanInterface() {
+ return
+ }
+
+ type rlocker interface {
+ RLocker() sync.Locker
+ }
+
+ var locker sync.Locker
+
+ // We can't call Interface() on a value directly, since that requires
+ // a copy. This is OK, since the pointer to a value which is a sync.Locker
+ // is also a sync.Locker.
+ if v.Kind() == reflect.Ptr {
+ switch l := v.Interface().(type) {
+ case rlocker:
+ // don't lock a mutex directly
+ if _, ok := l.(*sync.RWMutex); !ok {
+ locker = l.RLocker()
+ }
+ case sync.Locker:
+ locker = l
+ }
+ } else if v.CanAddr() {
+ switch l := v.Addr().Interface().(type) {
+ case rlocker:
+ // don't lock a mutex directly
+ if _, ok := l.(*sync.RWMutex); !ok {
+ locker = l.RLocker()
+ }
+ case sync.Locker:
+ locker = l
+ }
+ }
+
+ // still no callable locker
+ if locker == nil {
+ return
+ }
+
+ // don't lock a mutex directly
+ switch locker.(type) {
+ case *sync.Mutex, *sync.RWMutex:
+ return
+ }
+
+ locker.Lock()
+ w.locks[w.depth] = locker
+}
+
+// wrapPtr is a helper that takes v and always make it *v. copystructure
+// stores things internally as pointers until the last moment before unwrapping
+func wrapPtr(v reflect.Value) reflect.Value {
+ if !v.IsValid() {
+ return v
+ }
+ vPtr := reflect.New(v.Type())
+ vPtr.Elem().Set(v)
+ return vPtr
+}
diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE
new file mode 100644
index 00000000..f9c841a5
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md
new file mode 100644
index 00000000..d70706d5
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/README.md
@@ -0,0 +1,14 @@
+# go-homedir
+
+This is a Go library for detecting the user's home directory without
+the use of cgo, so the library can be used in cross-compilation environments.
+
+Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
+for a user, and `homedir.Expand()` to expand the `~` in a path to the home
+directory.
+
+**Why not just use `os/user`?** The built-in `os/user` package requires
+cgo on Darwin systems. This means that any Go code that uses that package
+cannot cross compile. But 99% of the time the use for `os/user` is just to
+retrieve the home directory, which we can do for the current user without
+cgo. This library does that, enabling cross-compilation.
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
new file mode 100644
index 00000000..47e1f9ef
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/homedir.go
@@ -0,0 +1,137 @@
+package homedir
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// DisableCache will disable caching of the home directory. Caching is enabled
+// by default.
+var DisableCache bool
+
+var homedirCache string
+var cacheLock sync.RWMutex
+
+// Dir returns the home directory for the executing user.
+//
+// This uses an OS-specific method for discovering the home directory.
+// An error is returned if a home directory cannot be detected.
+func Dir() (string, error) {
+ if !DisableCache {
+ cacheLock.RLock()
+ cached := homedirCache
+ cacheLock.RUnlock()
+ if cached != "" {
+ return cached, nil
+ }
+ }
+
+ cacheLock.Lock()
+ defer cacheLock.Unlock()
+
+ var result string
+ var err error
+ if runtime.GOOS == "windows" {
+ result, err = dirWindows()
+ } else {
+ // Unix-like system, so just assume Unix
+ result, err = dirUnix()
+ }
+
+ if err != nil {
+ return "", err
+ }
+ homedirCache = result
+ return result, nil
+}
+
+// Expand expands the path to include the home directory if the path
+// is prefixed with `~`. If it isn't prefixed with `~`, the path is
+// returned as-is.
+func Expand(path string) (string, error) {
+ if len(path) == 0 {
+ return path, nil
+ }
+
+ if path[0] != '~' {
+ return path, nil
+ }
+
+ if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
+ return "", errors.New("cannot expand user-specific home dir")
+ }
+
+ dir, err := Dir()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(dir, path[1:]), nil
+}
+
+func dirUnix() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+
+ // If that fails, try getent
+ var stdout bytes.Buffer
+ cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ // If the error is ErrNotFound, we ignore it. Otherwise, return it.
+ if err != exec.ErrNotFound {
+ return "", err
+ }
+ } else {
+ if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
+ // username:password:uid:gid:gecos:home:shell
+ passwdParts := strings.SplitN(passwd, ":", 7)
+ if len(passwdParts) > 5 {
+ return passwdParts[5], nil
+ }
+ }
+ }
+
+ // If all else fails, try the shell
+ stdout.Reset()
+ cmd = exec.Command("sh", "-c", "cd && pwd")
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ return "", err
+ }
+
+ result := strings.TrimSpace(stdout.String())
+ if result == "" {
+ return "", errors.New("blank output when reading home directory")
+ }
+
+ return result, nil
+}
+
+func dirWindows() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+
+ drive := os.Getenv("HOMEDRIVE")
+ path := os.Getenv("HOMEPATH")
+ home := drive + path
+ if drive == "" || path == "" {
+ home = os.Getenv("USERPROFILE")
+ }
+ if home == "" {
+ return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
+ }
+
+ return home, nil
+}
diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE
new file mode 100644
index 00000000..a3866a29
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-testing-interface/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md
new file mode 100644
index 00000000..26781bba
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-testing-interface/README.md
@@ -0,0 +1,52 @@
+# go-testing-interface
+
+go-testing-interface is a Go library that exports an interface that
+`*testing.T` implements as well as a runtime version you can use in its
+place.
+
+The purpose of this library is so that you can export test helpers as a
+public API without depending on the "testing" package, since you can't
+create a `*testing.T` struct manually. This lets you, for example, use the
+public testing APIs to generate mock data at runtime, rather than just at
+test time.
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface).
+
+Given a test helper written using `go-testing-interface` like this:
+
+ import "github.com/mitchellh/go-testing-interface"
+
+ func TestHelper(t testing.T) {
+ t.Fatal("I failed")
+ }
+
+You can call the test helper in a real test easily:
+
+ import "testing"
+
+ func TestThing(t *testing.T) {
+ TestHelper(t)
+ }
+
+You can also call the test helper at runtime if needed:
+
+ import "github.com/mitchellh/go-testing-interface"
+
+ func main() {
+ TestHelper(&testing.RuntimeT{})
+ }
+
+## Why?!
+
+**Why would I call a test helper that takes a *testing.T at runtime?**
+
+You probably shouldn't. The only use case I've seen (and I've had) for this
+is to implement a "dev mode" for a service where the test helpers are used
+to populate mock data, create a mock DB, perhaps run service dependencies
+in-memory, etc.
+
+Outside of a "dev mode", I've never seen a use case for this and I think
+there shouldn't be one since the point of the `testing.T` interface is that
+you can fail immediately.
diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go
new file mode 100644
index 00000000..af6426a4
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go
@@ -0,0 +1,70 @@
+package testing
+
+import (
+ "fmt"
+ "log"
+)
+
+// T is the interface that mimics the standard library *testing.T.
+//
+// In unit tests you can just pass a *testing.T struct. At runtime, outside
+// of tests, you can pass in a RuntimeT struct from this package.
+type T interface {
+ Error(args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatal(args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Fail()
+ FailNow()
+ Failed() bool
+ Log(args ...interface{})
+ Logf(format string, args ...interface{})
+}
+
+// RuntimeT implements T and can be instantiated and run at runtime to
+// mimic *testing.T behavior. Unlike *testing.T, this will simply panic
+// for calls to Fatal. For calls to Error, you'll have to check the errors
+// list to determine whether to exit yourself.
+type RuntimeT struct {
+ failed bool
+}
+
+func (t *RuntimeT) Error(args ...interface{}) {
+ log.Println(fmt.Sprintln(args...))
+ t.Fail()
+}
+
+func (t *RuntimeT) Errorf(format string, args ...interface{}) {
+ log.Println(fmt.Sprintf(format, args...))
+ t.Fail()
+}
+
+func (t *RuntimeT) Fatal(args ...interface{}) {
+ log.Println(fmt.Sprintln(args...))
+ t.FailNow()
+}
+
+func (t *RuntimeT) Fatalf(format string, args ...interface{}) {
+ log.Println(fmt.Sprintf(format, args...))
+ t.FailNow()
+}
+
+func (t *RuntimeT) Fail() {
+ t.failed = true
+}
+
+func (t *RuntimeT) FailNow() {
+ panic("testing.T failed, see logs for output (if any)")
+}
+
+func (t *RuntimeT) Failed() bool {
+ return t.failed
+}
+
+func (t *RuntimeT) Log(args ...interface{}) {
+ log.Println(fmt.Sprintln(args...))
+}
+
+func (t *RuntimeT) Logf(format string, args ...interface{}) {
+ log.Println(fmt.Sprintf(format, args...))
+}
diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE
new file mode 100644
index 00000000..a3866a29
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md
new file mode 100644
index 00000000..619a7faf
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/README.md
@@ -0,0 +1,62 @@
+# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure)
+
+hashstructure is a Go library for creating a unique hash value
+for arbitrary values in Go.
+
+This can be used to key values in a hash (for use in a map, set, etc.)
+that are complex. The most common use case is comparing two values without
+sending data across the network, caching values locally (de-dup), and so on.
+
+## Features
+
+ * Hash any arbitrary Go value, including complex types.
+
+ * Tag a struct field to ignore it and not affect the hash value.
+
+ * Tag a slice type struct field to treat it as a set where ordering
+ doesn't affect the hash code but the field itself is still taken into
+ account to create the hash value.
+
+ * Optionally specify a custom hash function to optimize for speed, collision
+ avoidance for your data set, etc.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/hashstructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure).
+
+A quick code example is shown below:
+
+```go
+type ComplexStruct struct {
+ Name string
+ Age uint
+ Metadata map[string]interface{}
+}
+
+v := ComplexStruct{
+ Name: "mitchellh",
+ Age: 64,
+ Metadata: map[string]interface{}{
+ "car": true,
+ "location": "California",
+ "siblings": []string{"Bob", "John"},
+ },
+}
+
+hash, err := hashstructure.Hash(v, nil)
+if err != nil {
+ panic(err)
+}
+
+fmt.Printf("%d", hash)
+// Output:
+// 2307517237273902113
+```
diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go
new file mode 100644
index 00000000..1800f435
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/hashstructure.go
@@ -0,0 +1,335 @@
+package hashstructure
+
+import (
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "hash/fnv"
+ "reflect"
+)
+
+// HashOptions are options that are available for hashing.
+type HashOptions struct {
+ // Hasher is the hash function to use. If this isn't set, it will
+ // default to FNV.
+ Hasher hash.Hash64
+
+ // TagName is the struct tag to look at when hashing the structure.
+ // By default this is "hash".
+ TagName string
+
+ // ZeroNil is flag determining if nil pointer should be treated equal
+ // to a zero value of pointed type. By default this is false.
+ ZeroNil bool
+}
+
+// Hash returns the hash value of an arbitrary value.
+//
+// If opts is nil, then default options will be used. See HashOptions
+// for the default values. The same *HashOptions value cannot be used
+// concurrently. None of the values within a *HashOptions struct are
+// safe to read/write while hashing is being done.
+//
+// Notes on the value:
+//
+// * Unexported fields on structs are ignored and do not affect the
+// hash value.
+//
+// * Adding an exported field to a struct with the zero value will change
+// the hash value.
+//
+// For structs, the hashing can be controlled using tags. For example:
+//
+// struct {
+// Name string
+// UUID string `hash:"ignore"`
+// }
+//
+// The available tag values are:
+//
+// * "ignore" or "-" - The field will be ignored and not affect the hash code.
+//
+// * "set" - The field will be treated as a set, where ordering doesn't
+// affect the hash code. This only works for slices.
+//
+func Hash(v interface{}, opts *HashOptions) (uint64, error) {
+ // Create default options
+ if opts == nil {
+ opts = &HashOptions{}
+ }
+ if opts.Hasher == nil {
+ opts.Hasher = fnv.New64()
+ }
+ if opts.TagName == "" {
+ opts.TagName = "hash"
+ }
+
+ // Reset the hash
+ opts.Hasher.Reset()
+
+ // Create our walker and walk the structure
+ w := &walker{
+ h: opts.Hasher,
+ tag: opts.TagName,
+ zeronil: opts.ZeroNil,
+ }
+ return w.visit(reflect.ValueOf(v), nil)
+}
+
+type walker struct {
+ h hash.Hash64
+ tag string
+ zeronil bool
+}
+
+type visitOpts struct {
+ // Flags are a bitmask of flags to affect behavior of this visit
+ Flags visitFlag
+
+ // Information about the struct containing this field
+ Struct interface{}
+ StructField string
+}
+
+func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) {
+ t := reflect.TypeOf(0)
+
+ // Loop since these can be wrapped in multiple layers of pointers
+ // and interfaces.
+ for {
+ // If we have an interface, dereference it. We have to do this up
+ // here because it might be a nil in there and the check below must
+ // catch that.
+ if v.Kind() == reflect.Interface {
+ v = v.Elem()
+ continue
+ }
+
+ if v.Kind() == reflect.Ptr {
+ if w.zeronil {
+ t = v.Type().Elem()
+ }
+ v = reflect.Indirect(v)
+ continue
+ }
+
+ break
+ }
+
+ // If it is nil, treat it like a zero.
+ if !v.IsValid() {
+ v = reflect.Zero(t)
+ }
+
+ // Binary writing can use raw ints, we have to convert to
+ // a sized-int, we'll choose the largest...
+ switch v.Kind() {
+ case reflect.Int:
+ v = reflect.ValueOf(int64(v.Int()))
+ case reflect.Uint:
+ v = reflect.ValueOf(uint64(v.Uint()))
+ case reflect.Bool:
+ var tmp int8
+ if v.Bool() {
+ tmp = 1
+ }
+ v = reflect.ValueOf(tmp)
+ }
+
+ k := v.Kind()
+
+ // We can shortcut numeric values by directly binary writing them
+ if k >= reflect.Int && k <= reflect.Complex64 {
+ // A direct hash calculation
+ w.h.Reset()
+ err := binary.Write(w.h, binary.LittleEndian, v.Interface())
+ return w.h.Sum64(), err
+ }
+
+ switch k {
+ case reflect.Array:
+ var h uint64
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ current, err := w.visit(v.Index(i), nil)
+ if err != nil {
+ return 0, err
+ }
+
+ h = hashUpdateOrdered(w.h, h, current)
+ }
+
+ return h, nil
+
+ case reflect.Map:
+ var includeMap IncludableMap
+ if opts != nil && opts.Struct != nil {
+ if v, ok := opts.Struct.(IncludableMap); ok {
+ includeMap = v
+ }
+ }
+
+ // Build the hash for the map. We do this by XOR-ing all the key
+ // and value hashes. This makes it deterministic despite ordering.
+ var h uint64
+ for _, k := range v.MapKeys() {
+ v := v.MapIndex(k)
+ if includeMap != nil {
+ incl, err := includeMap.HashIncludeMap(
+ opts.StructField, k.Interface(), v.Interface())
+ if err != nil {
+ return 0, err
+ }
+ if !incl {
+ continue
+ }
+ }
+
+ kh, err := w.visit(k, nil)
+ if err != nil {
+ return 0, err
+ }
+ vh, err := w.visit(v, nil)
+ if err != nil {
+ return 0, err
+ }
+
+ fieldHash := hashUpdateOrdered(w.h, kh, vh)
+ h = hashUpdateUnordered(h, fieldHash)
+ }
+
+ return h, nil
+
+ case reflect.Struct:
+ var include Includable
+ parent := v.Interface()
+ if impl, ok := parent.(Includable); ok {
+ include = impl
+ }
+
+ t := v.Type()
+ h, err := w.visit(reflect.ValueOf(t.Name()), nil)
+ if err != nil {
+ return 0, err
+ }
+
+ l := v.NumField()
+ for i := 0; i < l; i++ {
+ if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
+ var f visitFlag
+ fieldType := t.Field(i)
+ if fieldType.PkgPath != "" {
+ // Unexported
+ continue
+ }
+
+ tag := fieldType.Tag.Get(w.tag)
+ if tag == "ignore" || tag == "-" {
+ // Ignore this field
+ continue
+ }
+
+ // Check if we implement includable and check it
+ if include != nil {
+ incl, err := include.HashInclude(fieldType.Name, v)
+ if err != nil {
+ return 0, err
+ }
+ if !incl {
+ continue
+ }
+ }
+
+ switch tag {
+ case "set":
+ f |= visitFlagSet
+ }
+
+ kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil)
+ if err != nil {
+ return 0, err
+ }
+
+ vh, err := w.visit(v, &visitOpts{
+ Flags: f,
+ Struct: parent,
+ StructField: fieldType.Name,
+ })
+ if err != nil {
+ return 0, err
+ }
+
+ fieldHash := hashUpdateOrdered(w.h, kh, vh)
+ h = hashUpdateUnordered(h, fieldHash)
+ }
+ }
+
+ return h, nil
+
+ case reflect.Slice:
+ // We have two behaviors here. If it isn't a set, then we just
+ // visit all the elements. If it is a set, then we do a deterministic
+ // hash code.
+ var h uint64
+ var set bool
+ if opts != nil {
+ set = (opts.Flags & visitFlagSet) != 0
+ }
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ current, err := w.visit(v.Index(i), nil)
+ if err != nil {
+ return 0, err
+ }
+
+ if set {
+ h = hashUpdateUnordered(h, current)
+ } else {
+ h = hashUpdateOrdered(w.h, h, current)
+ }
+ }
+
+ return h, nil
+
+ case reflect.String:
+ // Directly hash
+ w.h.Reset()
+ _, err := w.h.Write([]byte(v.String()))
+ return w.h.Sum64(), err
+
+ default:
+ return 0, fmt.Errorf("unknown kind to hash: %s", k)
+ }
+
+ return 0, nil
+}
+
+func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 {
+ // For ordered updates, use a real hash function
+ h.Reset()
+
+ // We just panic if the binary writes fail because we are writing
+ // an int64 which should never be fail-able.
+ e1 := binary.Write(h, binary.LittleEndian, a)
+ e2 := binary.Write(h, binary.LittleEndian, b)
+ if e1 != nil {
+ panic(e1)
+ }
+ if e2 != nil {
+ panic(e2)
+ }
+
+ return h.Sum64()
+}
+
+func hashUpdateUnordered(a, b uint64) uint64 {
+ return a ^ b
+}
+
+// visitFlag is used as a bitmask for affecting visit behavior
+type visitFlag uint
+
+const (
+ visitFlagInvalid visitFlag = iota
+ visitFlagSet = iota << 1
+)
diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go
new file mode 100644
index 00000000..b6289c0b
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/include.go
@@ -0,0 +1,15 @@
+package hashstructure
+
+// Includable is an interface that can optionally be implemented by
+// a struct. It will be called for each field in the struct to check whether
+// it should be included in the hash.
+type Includable interface {
+ HashInclude(field string, v interface{}) (bool, error)
+}
+
+// IncludableMap is an interface that can optionally be implemented by
+// a struct. It will be called when a map-type field is found to ask the
+// struct if the map item should be included in the hash.
+type IncludableMap interface {
+ HashIncludeMap(field string, k, v interface{}) (bool, error)
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE
new file mode 100644
index 00000000..f9c841a5
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md
new file mode 100644
index 00000000..659d6885
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/README.md
@@ -0,0 +1,46 @@
+# mapstructure
+
+mapstructure is a Go library for decoding generic map values to structures
+and vice versa, while providing helpful error handling.
+
+This library is most useful when decoding values from some data stream (JSON,
+Gob, etc.) where you don't _quite_ know the structure of the underlying data
+until you read a part of it. You can therefore read a `map[string]interface{}`
+and use this library to decode it into the proper underlying native Go
+structure.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/mapstructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
+
+The `Decode` function has examples associated with it there.
+
+## But Why?!
+
+Go offers fantastic standard libraries for decoding formats such as JSON.
+The standard method is to have a struct pre-created, and populate that struct
+from the bytes of the encoded format. This is great, but the problem is if
+you have configuration or an encoding that changes slightly depending on
+specific fields. For example, consider this JSON:
+
+```json
+{
+ "type": "person",
+ "name": "Mitchell"
+}
+```
+
+Perhaps we can't populate a specific structure without first reading
+the "type" field from the JSON. We could always do two passes over the
+decoding of the JSON (reading the "type" first, and the rest later).
+However, it is much simpler to just decode this into a `map[string]interface{}`
+structure, read the "type" key, then use something like this library
+to decode it into the proper structure.
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
new file mode 100644
index 00000000..acdaadef
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
@@ -0,0 +1,158 @@
+package mapstructure
+
+import (
+ "errors"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+ // Create variables here so we can reference them with the reflect pkg
+ var f1 DecodeHookFuncType
+ var f2 DecodeHookFuncKind
+
+ // Fill in the variables into this interface and the rest is done
+ // automatically using the reflect package.
+ potential := []interface{}{f1, f2}
+
+ v := reflect.ValueOf(h)
+ vt := v.Type()
+ for _, raw := range potential {
+ pt := reflect.ValueOf(raw).Type()
+ if vt.ConvertibleTo(pt) {
+ return v.Convert(pt).Interface()
+ }
+ }
+
+ return nil
+}
+
+// DecodeHookExec executes the given decode hook. This should be used
+// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+// that took reflect.Kind instead of reflect.Type.
+func DecodeHookExec(
+ raw DecodeHookFunc,
+ from reflect.Type, to reflect.Type,
+ data interface{}) (interface{}, error) {
+ // Build our arguments that reflect expects
+ argVals := make([]reflect.Value, 3)
+ argVals[0] = reflect.ValueOf(from)
+ argVals[1] = reflect.ValueOf(to)
+ argVals[2] = reflect.ValueOf(data)
+
+ switch f := typedDecodeHook(raw).(type) {
+ case DecodeHookFuncType:
+ return f(from, to, data)
+ case DecodeHookFuncKind:
+ return f(from.Kind(), to.Kind(), data)
+ default:
+ return nil, errors.New("invalid decode hook signature")
+ }
+}
+
+// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+// automatically composes multiple DecodeHookFuncs.
+//
+// The composed funcs are called in order, with the result of the
+// previous transformation.
+func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ var err error
+ for _, f1 := range fs {
+ data, err = DecodeHookExec(f1, f, t, data)
+ if err != nil {
+ return nil, err
+ }
+
+ // Modify the from kind to be correct with the new data
+ f = nil
+ if val := reflect.ValueOf(data); val.IsValid() {
+ f = val.Type()
+ }
+ }
+
+ return data, nil
+ }
+}
+
+// StringToSliceHookFunc returns a DecodeHookFunc that converts
+// string to []string by splitting on the given sep.
+func StringToSliceHookFunc(sep string) DecodeHookFunc {
+ return func(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ if f != reflect.String || t != reflect.Slice {
+ return data, nil
+ }
+
+ raw := data.(string)
+ if raw == "" {
+ return []string{}, nil
+ }
+
+ return strings.Split(raw, sep), nil
+ }
+}
+
+// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+// strings to time.Duration.
+func StringToTimeDurationHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(time.Duration(5)) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return time.ParseDuration(data.(string))
+ }
+}
+
+// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
+// the decoder.
+//
+// Note that this is significantly different from the WeaklyTypedInput option
+// of the DecoderConfig.
+func WeaklyTypedHook(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ dataVal := reflect.ValueOf(data)
+ switch t {
+ case reflect.String:
+ switch f {
+ case reflect.Bool:
+ if dataVal.Bool() {
+ return "1", nil
+ }
+ return "0", nil
+ case reflect.Float32:
+ return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+ case reflect.Int:
+ return strconv.FormatInt(dataVal.Int(), 10), nil
+ case reflect.Slice:
+ dataType := dataVal.Type()
+ elemKind := dataType.Elem().Kind()
+ if elemKind == reflect.Uint8 {
+ return string(dataVal.Interface().([]uint8)), nil
+ }
+ case reflect.Uint:
+ return strconv.FormatUint(dataVal.Uint(), 10), nil
+ }
+ }
+
+ return data, nil
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
new file mode 100644
index 00000000..47a99e5a
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/error.go
@@ -0,0 +1,50 @@
+package mapstructure
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Error implements the error interface and can represents multiple
+// errors that occur in the course of a single decode.
+type Error struct {
+ Errors []string
+}
+
+func (e *Error) Error() string {
+ points := make([]string, len(e.Errors))
+ for i, err := range e.Errors {
+ points[i] = fmt.Sprintf("* %s", err)
+ }
+
+ sort.Strings(points)
+ return fmt.Sprintf(
+ "%d error(s) decoding:\n\n%s",
+ len(e.Errors), strings.Join(points, "\n"))
+}
+
+// WrappedErrors implements the errwrap.Wrapper interface to make this
+// return value more useful with the errwrap and go-multierror libraries.
+func (e *Error) WrappedErrors() []error {
+ if e == nil {
+ return nil
+ }
+
+ result := make([]error, len(e.Errors))
+ for i, e := range e.Errors {
+ result[i] = errors.New(e)
+ }
+
+ return result
+}
+
+func appendErrors(errors []string, err error) []string {
+ switch e := err.(type) {
+ case *Error:
+ return append(errors, e.Errors...)
+ default:
+ return append(errors, e.Error())
+ }
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
new file mode 100644
index 00000000..6ec5c333
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -0,0 +1,828 @@
+// Package mapstructure exposes functionality to convert an arbitrary
+// map[string]interface{} into a native Go structure.
+//
+// The Go structure can be arbitrarily complex, containing slices,
+// other structs, etc. and the decoder will properly decode nested
+// maps and so on into the proper structures in the native Go struct.
+// See the examples to see what the decoder is capable of.
+package mapstructure
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// DecodeHookFunc is the callback function that can be used for
+// data transformations. See "DecodeHook" in the DecoderConfig
+// struct.
+//
+// The type should be DecodeHookFuncType or DecodeHookFuncKind.
+// Either is accepted. Types are a superset of Kinds (Types can return
+// Kinds) and are generally a richer thing to use, but Kinds are simpler
+// if you only need those.
+//
+// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
+// we started with Kinds and then realized Types were the better solution,
+// but have a promise to not break backwards compat so we now support
+// both.
+type DecodeHookFunc interface{}
+
+// DecodeHookFuncType is a DecodeHookFunc which has complete information about
+// the source and target types.
+type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+
+// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
+// source and target types.
+type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
+
+// DecoderConfig is the configuration that is used to create a new decoder
+// and allows customization of various aspects of decoding.
+type DecoderConfig struct {
+ // DecodeHook, if set, will be called before any decoding and any
+ // type conversion (if WeaklyTypedInput is on). This lets you modify
+ // the values before they're set down onto the resulting struct.
+ //
+ // If an error is returned, the entire decode will fail with that
+ // error.
+ DecodeHook DecodeHookFunc
+
+ // If ErrorUnused is true, then it is an error for there to exist
+ // keys in the original map that were unused in the decoding process
+ // (extra keys).
+ ErrorUnused bool
+
+ // ZeroFields, if set to true, will zero fields before writing them.
+ // For example, a map will be emptied before decoded values are put in
+ // it. If this is false, a map will be merged.
+ ZeroFields bool
+
+ // If WeaklyTypedInput is true, the decoder will make the following
+ // "weak" conversions:
+ //
+ // - bools to string (true = "1", false = "0")
+ // - numbers to string (base 10)
+ // - bools to int/uint (true = 1, false = 0)
+ // - strings to int/uint (base implied by prefix)
+ // - int to bool (true if value != 0)
+ // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
+ // FALSE, false, False. Anything else is an error)
+ // - empty array = empty map and vice versa
+ // - negative numbers to overflowed uint values (base 10)
+ // - slice of maps to a merged map
+ // - single values are converted to slices if required. Each
+ // element is weakly decoded. For example: "4" can become []int{4}
+ // if the target type is an int slice.
+ //
+ WeaklyTypedInput bool
+
+ // Metadata is the struct that will contain extra metadata about
+ // the decoding. If this is nil, then no metadata will be tracked.
+ Metadata *Metadata
+
+ // Result is a pointer to the struct that will contain the decoded
+ // value.
+ Result interface{}
+
+ // The tag name that mapstructure reads for field names. This
+ // defaults to "mapstructure"
+ TagName string
+}
+
+// A Decoder takes a raw interface value and turns it into structured
+// data, keeping track of rich error information along the way in case
+// anything goes wrong. Unlike the basic top-level Decode method, you can
+// more finely control how the Decoder behaves using the DecoderConfig
+// structure. The top-level Decode method is just a convenience that sets
+// up the most basic Decoder.
+type Decoder struct {
+ config *DecoderConfig
+}
+
+// Metadata contains information about decoding a structure that
+// is tedious or difficult to get otherwise.
+type Metadata struct {
+ // Keys are the keys of the structure which were successfully decoded
+ Keys []string
+
+ // Unused is a slice of keys that were found in the raw value but
+ // weren't decoded since there was no matching field in the result interface
+ Unused []string
+}
+
+// Decode takes a map and uses reflection to convert it into the
+// given Go native structure. val must be a pointer to a struct.
+func Decode(m interface{}, rawVal interface{}) error {
+ config := &DecoderConfig{
+ Metadata: nil,
+ Result: rawVal,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(m)
+}
+
+// WeakDecode is the same as Decode but is shorthand to enable
+// WeaklyTypedInput. See DecoderConfig for more info.
+func WeakDecode(input, output interface{}) error {
+ config := &DecoderConfig{
+ Metadata: nil,
+ Result: output,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// NewDecoder returns a new decoder for the given configuration. Once
+// a decoder has been returned, the same configuration must not be used
+// again.
+func NewDecoder(config *DecoderConfig) (*Decoder, error) {
+ val := reflect.ValueOf(config.Result)
+ if val.Kind() != reflect.Ptr {
+ return nil, errors.New("result must be a pointer")
+ }
+
+ val = val.Elem()
+ if !val.CanAddr() {
+ return nil, errors.New("result must be addressable (a pointer)")
+ }
+
+ if config.Metadata != nil {
+ if config.Metadata.Keys == nil {
+ config.Metadata.Keys = make([]string, 0)
+ }
+
+ if config.Metadata.Unused == nil {
+ config.Metadata.Unused = make([]string, 0)
+ }
+ }
+
+ if config.TagName == "" {
+ config.TagName = "mapstructure"
+ }
+
+ result := &Decoder{
+ config: config,
+ }
+
+ return result, nil
+}
+
+// Decode decodes the given raw interface to the target pointer specified
+// by the configuration.
+func (d *Decoder) Decode(raw interface{}) error {
+ return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem())
+}
+
+// Decodes an unknown data type into a specific reflection value.
+func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error {
+ if data == nil {
+ // If the data is nil, then we don't set anything.
+ return nil
+ }
+
+ dataVal := reflect.ValueOf(data)
+ if !dataVal.IsValid() {
+ // If the data value is invalid, then we just set the value
+ // to be the zero value.
+ val.Set(reflect.Zero(val.Type()))
+ return nil
+ }
+
+ if d.config.DecodeHook != nil {
+ // We have a DecodeHook, so let's pre-process the data.
+ var err error
+ data, err = DecodeHookExec(
+ d.config.DecodeHook,
+ dataVal.Type(), val.Type(), data)
+ if err != nil {
+ return fmt.Errorf("error decoding '%s': %s", name, err)
+ }
+ }
+
+ var err error
+ dataKind := getKind(val)
+ switch dataKind {
+ case reflect.Bool:
+ err = d.decodeBool(name, data, val)
+ case reflect.Interface:
+ err = d.decodeBasic(name, data, val)
+ case reflect.String:
+ err = d.decodeString(name, data, val)
+ case reflect.Int:
+ err = d.decodeInt(name, data, val)
+ case reflect.Uint:
+ err = d.decodeUint(name, data, val)
+ case reflect.Float32:
+ err = d.decodeFloat(name, data, val)
+ case reflect.Struct:
+ err = d.decodeStruct(name, data, val)
+ case reflect.Map:
+ err = d.decodeMap(name, data, val)
+ case reflect.Ptr:
+ err = d.decodePtr(name, data, val)
+ case reflect.Slice:
+ err = d.decodeSlice(name, data, val)
+ case reflect.Func:
+ err = d.decodeFunc(name, data, val)
+ default:
+ // If we reached this point then we weren't able to decode it
+ return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
+ }
+
+ // If we reached here, then we successfully decoded SOMETHING, so
+ // mark the key as used if we're tracking metadata.
+ if d.config.Metadata != nil && name != "" {
+ d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ }
+
+ return err
+}
+
+// This decodes a basic type (bool, int, string, etc.) and sets the
+// value to "data" of that type.
+func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.ValueOf(data)
+ if !dataVal.IsValid() {
+ dataVal = reflect.Zero(val.Type())
+ }
+
+ dataValType := dataVal.Type()
+ if !dataValType.AssignableTo(val.Type()) {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got '%s'",
+ name, val.Type(), dataValType)
+ }
+
+ val.Set(dataVal)
+ return nil
+}
+
+func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.ValueOf(data)
+ dataKind := getKind(dataVal)
+
+ converted := true
+ switch {
+ case dataKind == reflect.String:
+ val.SetString(dataVal.String())
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetString("1")
+ } else {
+ val.SetString("0")
+ }
+ case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatInt(dataVal.Int(), 10))
+ case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
+ case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
+ case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
+ dataType := dataVal.Type()
+ elemKind := dataType.Elem().Kind()
+ switch {
+ case elemKind == reflect.Uint8:
+ val.SetString(string(dataVal.Interface().([]uint8)))
+ default:
+ converted = false
+ }
+ default:
+ converted = false
+ }
+
+ if !converted {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s'",
+ name, val.Type(), dataVal.Type())
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.ValueOf(data)
+ dataKind := getKind(dataVal)
+ dataType := dataVal.Type()
+
+ switch {
+ case dataKind == reflect.Int:
+ val.SetInt(dataVal.Int())
+ case dataKind == reflect.Uint:
+ val.SetInt(int64(dataVal.Uint()))
+ case dataKind == reflect.Float32:
+ val.SetInt(int64(dataVal.Float()))
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetInt(1)
+ } else {
+ val.SetInt(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
+ if err == nil {
+ val.SetInt(i)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
+ }
+ case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+ jn := data.(json.Number)
+ i, err := jn.Int64()
+ if err != nil {
+ return fmt.Errorf(
+ "error decoding json.Number into %s: %s", name, err)
+ }
+ val.SetInt(i)
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s'",
+ name, val.Type(), dataVal.Type())
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.ValueOf(data)
+ dataKind := getKind(dataVal)
+
+ switch {
+ case dataKind == reflect.Int:
+ i := dataVal.Int()
+ if i < 0 && !d.config.WeaklyTypedInput {
+ return fmt.Errorf("cannot parse '%s', %d overflows uint",
+ name, i)
+ }
+ val.SetUint(uint64(i))
+ case dataKind == reflect.Uint:
+ val.SetUint(dataVal.Uint())
+ case dataKind == reflect.Float32:
+ f := dataVal.Float()
+ if f < 0 && !d.config.WeaklyTypedInput {
+ return fmt.Errorf("cannot parse '%s', %f overflows uint",
+ name, f)
+ }
+ val.SetUint(uint64(f))
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetUint(1)
+ } else {
+ val.SetUint(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
+ if err == nil {
+ val.SetUint(i)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
+ }
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s'",
+ name, val.Type(), dataVal.Type())
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.ValueOf(data)
+ dataKind := getKind(dataVal)
+
+ switch {
+ case dataKind == reflect.Bool:
+ val.SetBool(dataVal.Bool())
+ case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Int() != 0)
+ case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Uint() != 0)
+ case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Float() != 0)
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ b, err := strconv.ParseBool(dataVal.String())
+ if err == nil {
+ val.SetBool(b)
+ } else if dataVal.String() == "" {
+ val.SetBool(false)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
+ }
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s'",
+ name, val.Type(), dataVal.Type())
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.ValueOf(data)
+ dataKind := getKind(dataVal)
+ dataType := dataVal.Type()
+
+ switch {
+ case dataKind == reflect.Int:
+ val.SetFloat(float64(dataVal.Int()))
+ case dataKind == reflect.Uint:
+ val.SetFloat(float64(dataVal.Uint()))
+ case dataKind == reflect.Float32:
+ val.SetFloat(dataVal.Float())
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetFloat(1)
+ } else {
+ val.SetFloat(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
+ if err == nil {
+ val.SetFloat(f)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
+ }
+ case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+ jn := data.(json.Number)
+ i, err := jn.Float64()
+ if err != nil {
+ return fmt.Errorf(
+ "error decoding json.Number into %s: %s", name, err)
+ }
+ val.SetFloat(i)
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s'",
+ name, val.Type(), dataVal.Type())
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
+ valType := val.Type()
+ valKeyType := valType.Key()
+ valElemType := valType.Elem()
+
+ // By default we overwrite keys in the current map
+ valMap := val
+
+ // If the map is nil or we're purposely zeroing fields, make a new map
+ if valMap.IsNil() || d.config.ZeroFields {
+ // Make a new map to hold our result
+ mapType := reflect.MapOf(valKeyType, valElemType)
+ valMap = reflect.MakeMap(mapType)
+ }
+
+ // Check input type
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ if dataVal.Kind() != reflect.Map {
+ // In weak mode, we accept a slice of maps as an input...
+ if d.config.WeaklyTypedInput {
+ switch dataVal.Kind() {
+ case reflect.Array, reflect.Slice:
+ // Special case for BC reasons (covered by tests)
+ if dataVal.Len() == 0 {
+ val.Set(valMap)
+ return nil
+ }
+
+ for i := 0; i < dataVal.Len(); i++ {
+ err := d.decode(
+ fmt.Sprintf("%s[%d]", name, i),
+ dataVal.Index(i).Interface(), val)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+ }
+
+ return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+ }
+
+ // Accumulate errors
+ errors := make([]string, 0)
+
+ for _, k := range dataVal.MapKeys() {
+ fieldName := fmt.Sprintf("%s[%s]", name, k)
+
+ // First decode the key into the proper type
+ currentKey := reflect.Indirect(reflect.New(valKeyType))
+ if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
+ errors = appendErrors(errors, err)
+ continue
+ }
+
+ // Next decode the data into the proper type
+ v := dataVal.MapIndex(k).Interface()
+ currentVal := reflect.Indirect(reflect.New(valElemType))
+ if err := d.decode(fieldName, v, currentVal); err != nil {
+ errors = appendErrors(errors, err)
+ continue
+ }
+
+ valMap.SetMapIndex(currentKey, currentVal)
+ }
+
+ // Set the built up map to the value
+ val.Set(valMap)
+
+ // If we had errors, return those
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ valType := val.Type()
+ valElemType := valType.Elem()
+
+ realVal := val
+ if realVal.IsNil() || d.config.ZeroFields {
+ realVal = reflect.New(valElemType)
+ }
+
+ if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
+ return err
+ }
+
+ val.Set(realVal)
+ return nil
+}
+
+func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ if val.Type() != dataVal.Type() {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s'",
+ name, val.Type(), dataVal.Type())
+ }
+ val.Set(dataVal)
+ return nil
+}
+
+func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataValKind := dataVal.Kind()
+ valType := val.Type()
+ valElemType := valType.Elem()
+ sliceType := reflect.SliceOf(valElemType)
+
+ valSlice := val
+ if valSlice.IsNil() || d.config.ZeroFields {
+ // Check input type
+ if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+ if d.config.WeaklyTypedInput {
+ switch {
+ // Empty maps turn into empty slices
+ case dataValKind == reflect.Map:
+ if dataVal.Len() == 0 {
+ val.Set(reflect.MakeSlice(sliceType, 0, 0))
+ return nil
+ }
+
+ // All other types we try to convert to the slice type
+ // and "lift" it into it. i.e. a string becomes a string slice.
+ default:
+ // Just re-try this function with data as a slice.
+ return d.decodeSlice(name, []interface{}{data}, val)
+ }
+ }
+
+ return fmt.Errorf(
+ "'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+ }
+
+ // Make a new slice to hold our result, same size as the original data.
+ valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+ }
+
+ // Accumulate any errors
+ errors := make([]string, 0)
+
+ for i := 0; i < dataVal.Len(); i++ {
+ currentData := dataVal.Index(i).Interface()
+ for valSlice.Len() <= i {
+ valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
+ }
+ currentField := valSlice.Index(i)
+
+ fieldName := fmt.Sprintf("%s[%d]", name, i)
+ if err := d.decode(fieldName, currentData, currentField); err != nil {
+ errors = appendErrors(errors, err)
+ }
+ }
+
+ // Finally, set the value to the slice we built up
+ val.Set(valSlice)
+
+ // If there were errors, we return those
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+
+ // If the type of the value to write to and the data match directly,
+ // then we just set it directly instead of recursing into the structure.
+ if dataVal.Type() == val.Type() {
+ val.Set(dataVal)
+ return nil
+ }
+
+ dataValKind := dataVal.Kind()
+ if dataValKind != reflect.Map {
+ return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
+ }
+
+ dataValType := dataVal.Type()
+ if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
+ return fmt.Errorf(
+ "'%s' needs a map with string keys, has '%s' keys",
+ name, dataValType.Key().Kind())
+ }
+
+ dataValKeys := make(map[reflect.Value]struct{})
+ dataValKeysUnused := make(map[interface{}]struct{})
+ for _, dataValKey := range dataVal.MapKeys() {
+ dataValKeys[dataValKey] = struct{}{}
+ dataValKeysUnused[dataValKey.Interface()] = struct{}{}
+ }
+
+ errors := make([]string, 0)
+
+ // This slice will keep track of all the structs we'll be decoding.
+ // There can be more than one struct if there are embedded structs
+ // that are squashed.
+ structs := make([]reflect.Value, 1, 5)
+ structs[0] = val
+
+ // Compile the list of all the fields that we're going to be decoding
+ // from all the structs.
+ fields := make(map[*reflect.StructField]reflect.Value)
+ for len(structs) > 0 {
+ structVal := structs[0]
+ structs = structs[1:]
+
+ structType := structVal.Type()
+
+ for i := 0; i < structType.NumField(); i++ {
+ fieldType := structType.Field(i)
+ fieldKind := fieldType.Type.Kind()
+
+ // If "squash" is specified in the tag, we squash the field down.
+ squash := false
+ tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
+ for _, tag := range tagParts[1:] {
+ if tag == "squash" {
+ squash = true
+ break
+ }
+ }
+
+ if squash {
+ if fieldKind != reflect.Struct {
+ errors = appendErrors(errors,
+ fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
+ } else {
+ structs = append(structs, val.FieldByName(fieldType.Name))
+ }
+ continue
+ }
+
+ // Normal struct field, store it away
+ fields[&fieldType] = structVal.Field(i)
+ }
+ }
+
+ for fieldType, field := range fields {
+ fieldName := fieldType.Name
+
+ tagValue := fieldType.Tag.Get(d.config.TagName)
+ tagValue = strings.SplitN(tagValue, ",", 2)[0]
+ if tagValue != "" {
+ fieldName = tagValue
+ }
+
+ rawMapKey := reflect.ValueOf(fieldName)
+ rawMapVal := dataVal.MapIndex(rawMapKey)
+ if !rawMapVal.IsValid() {
+ // Do a slower search by iterating over each key and
+ // doing case-insensitive search.
+ for dataValKey := range dataValKeys {
+ mK, ok := dataValKey.Interface().(string)
+ if !ok {
+ // Not a string key
+ continue
+ }
+
+ if strings.EqualFold(mK, fieldName) {
+ rawMapKey = dataValKey
+ rawMapVal = dataVal.MapIndex(dataValKey)
+ break
+ }
+ }
+
+ if !rawMapVal.IsValid() {
+ // There was no matching key in the map for the value in
+ // the struct. Just ignore.
+ continue
+ }
+ }
+
+ // Delete the key we're using from the unused map so we stop tracking
+ delete(dataValKeysUnused, rawMapKey.Interface())
+
+ if !field.IsValid() {
+ // This should never happen
+ panic("field is not valid")
+ }
+
+ // If we can't set the field, then it is unexported or something,
+ // and we just continue onwards.
+ if !field.CanSet() {
+ continue
+ }
+
+ // If the name is empty string, then we're at the root, and we
+ // don't dot-join the fields.
+ if name != "" {
+ fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+ }
+
+ if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
+ errors = appendErrors(errors, err)
+ }
+ }
+
+ if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
+ keys := make([]string, 0, len(dataValKeysUnused))
+ for rawKey := range dataValKeysUnused {
+ keys = append(keys, rawKey.(string))
+ }
+ sort.Strings(keys)
+
+ err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
+ errors = appendErrors(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ // Add the unused keys to the list of unused keys if we're tracking metadata
+ if d.config.Metadata != nil {
+ for rawKey := range dataValKeysUnused {
+ key := rawKey.(string)
+ if name != "" {
+ key = fmt.Sprintf("%s.%s", name, key)
+ }
+
+ d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
+ }
+ }
+
+ return nil
+}
+
+func getKind(val reflect.Value) reflect.Kind {
+ kind := val.Kind()
+
+ switch {
+ case kind >= reflect.Int && kind <= reflect.Int64:
+ return reflect.Int
+ case kind >= reflect.Uint && kind <= reflect.Uint64:
+ return reflect.Uint
+ case kind >= reflect.Float32 && kind <= reflect.Float64:
+ return reflect.Float32
+ default:
+ return kind
+ }
+}
diff --git a/vendor/github.com/mitchellh/packer/LICENSE b/vendor/github.com/mitchellh/packer/LICENSE
new file mode 100644
index 00000000..a612ad98
--- /dev/null
+++ b/vendor/github.com/mitchellh/packer/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/mitchellh/packer/README.md b/vendor/github.com/mitchellh/packer/README.md
new file mode 100644
index 00000000..b61dd6d8
--- /dev/null
+++ b/vendor/github.com/mitchellh/packer/README.md
@@ -0,0 +1,110 @@
+# Packer
+
+[![Build Status][travis-badge]][travis]
+[![Windows Build Status][appveyor-badge]][appveyor]
+[![GoDoc][godoc-badge]][godoc]
+[![GoReportCard][report-badge]][report]
+
+[travis-badge]: https://travis-ci.org/hashicorp/packer.svg?branch=master
+[travis]: https://travis-ci.org/hashicorp/packer
+[appveyor-badge]: https://ci.appveyor.com/api/projects/status/github/mitchellh/packer?branch=master&svg=true
+[appveyor]: https://ci.appveyor.com/project/hashicorp/packer
+[godoc-badge]: https://godoc.org/github.com/mitchellh/packer?status.svg
+[godoc]: https://godoc.org/github.com/mitchellh/packer
+[report-badge]: https://goreportcard.com/badge/github.com/mitchellh/packer
+[report]: https://goreportcard.com/report/github.com/mitchellh/packer
+
+* Website: https://www.packer.io
+* IRC: `#packer-tool` on Freenode
+* Mailing list: [Google Groups](http://groups.google.com/group/packer-tool)
+
+Packer is a tool for building identical machine images for multiple platforms
+from a single source configuration.
+
+Packer is lightweight, runs on every major operating system, and is highly
+performant, creating machine images for multiple platforms in parallel. Packer
+comes out of the box with support for the following platforms:
+
+* Amazon EC2 (AMI). Both EBS-backed and instance-store AMIs
+* Azure
+* CloudStack
+* DigitalOcean
+* Docker
+* Google Compute Engine
+* Hyper-V
+* 1&1
+* OpenStack
+* Parallels
+* ProfitBricks
+* QEMU. Both KVM and Xen images.
+* Triton (Joyent Public Cloud)
+* VMware
+* VirtualBox
+
+Support for other platforms can be added via plugins.
+
+The images that Packer creates can easily be turned into
+[Vagrant](http://www.vagrantup.com) boxes.
+
+## Quick Start
+Download and install packages and dependencies
+```
+go get github.com/hashicorp/packer
+```
+
+**Note:** There is a great
+[introduction and getting started guide](http://www.packer.io/intro)
+for those with a bit more patience. Otherwise, the quick start below
+will get you up and running quickly, at the sacrifice of not explaining some
+key points.
+
+First, [download a pre-built Packer binary](http://www.packer.io/downloads.html)
+for your operating system or [compile Packer yourself](CONTRIBUTING.md#setting-up-go-to-work-on-packer).
+
+After Packer is installed, create your first template, which tells Packer
+what platforms to build images for and how you want to build them. In our
+case, we'll create a simple AMI that has Redis pre-installed. Save this
+file as `quick-start.json`. Export your AWS credentials as the
+`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables.
+
+```json
+{
+ "variables": {
+ "access_key": "{{env `AWS_ACCESS_KEY_ID`}}",
+ "secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}"
+ },
+ "builders": [{
+ "type": "amazon-ebs",
+ "access_key": "{{user `access_key`}}",
+ "secret_key": "{{user `secret_key`}}",
+ "region": "us-east-1",
+ "source_ami": "ami-af22d9b9",
+ "instance_type": "t2.micro",
+ "ssh_username": "ubuntu",
+ "ami_name": "packer-example {{timestamp}}"
+ }]
+}
+```
+
+Next, tell Packer to build the image:
+
+```
+$ packer build quick-start.json
+...
+```
+
+Packer will build an AMI according to the "quick-start" template. The AMI
+will be available in your AWS account. To delete the AMI, you must manually
+delete it using the [AWS console](https://console.aws.amazon.com/). Packer
+builds your images, it does not manage their lifecycle. Where they go, how
+they're run, etc. is up to you.
+
+## Documentation
+
+Comprehensive documentation is viewable on the Packer website:
+
+http://www.packer.io/docs
+
+## Developing Packer
+
+See [CONTRIBUTING.md](https://github.com/hashicorp/packer/blob/master/CONTRIBUTING.md) for best practices and instructions on setting up your development environment to work on Packer.
diff --git a/vendor/github.com/mitchellh/packer/common/uuid/uuid.go b/vendor/github.com/mitchellh/packer/common/uuid/uuid.go
new file mode 100644
index 00000000..d8b9830b
--- /dev/null
+++ b/vendor/github.com/mitchellh/packer/common/uuid/uuid.go
@@ -0,0 +1,24 @@
+package uuid
+
+import (
+ "crypto/rand"
+ "fmt"
+ "time"
+)
+
+// Generates a time ordered UUID. Top 32 bits are a timestamp,
+// bottom 96 are random.
+func TimeOrderedUUID() string {
+ unix := uint32(time.Now().UTC().Unix())
+
+ b := make([]byte, 12)
+ n, err := rand.Read(b)
+ if n != len(b) {
+ err = fmt.Errorf("Not enough entropy available")
+ }
+ if err != nil {
+ panic(err)
+ }
+ return fmt.Sprintf("%08x-%04x-%04x-%04x-%04x%08x",
+ unix, b[0:2], b[2:4], b[4:6], b[6:8], b[8:])
+}
diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE
new file mode 100644
index 00000000..f9c841a5
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md
new file mode 100644
index 00000000..ac82cd2e
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/README.md
@@ -0,0 +1,6 @@
+# reflectwalk
+
+reflectwalk is a Go library for "walking" a value in Go using reflection,
+in the same way a directory tree can be "walked" on the filesystem. Walking
+a complex structure can allow you to do manipulations on unknown structures
+such as those decoded from JSON.
diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go
new file mode 100644
index 00000000..6a7f1761
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/location.go
@@ -0,0 +1,19 @@
+package reflectwalk
+
+//go:generate stringer -type=Location location.go
+
+type Location uint
+
+const (
+ None Location = iota
+ Map
+ MapKey
+ MapValue
+ Slice
+ SliceElem
+ Array
+ ArrayElem
+ Struct
+ StructField
+ WalkLoc
+)
diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go
new file mode 100644
index 00000000..d3cfe854
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=Location location.go; DO NOT EDIT
+
+package reflectwalk
+
+import "fmt"
+
+const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc"
+
+var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59}
+
+func (i Location) String() string {
+ if i+1 >= Location(len(_Location_index)) {
+ return fmt.Sprintf("Location(%d)", i)
+ }
+ return _Location_name[_Location_index[i]:_Location_index[i+1]]
+}
diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
new file mode 100644
index 00000000..33287088
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
@@ -0,0 +1,392 @@
+// reflectwalk is a package that allows you to "walk" complex structures
+// similar to how you may "walk" a filesystem: visiting every element one
+// by one and calling callback functions allowing you to handle and manipulate
+// those elements.
+package reflectwalk
+
+import (
+ "errors"
+ "reflect"
+)
+
+// PrimitiveWalker implementations are able to handle primitive values
+// within complex structures. Primitive values are numbers, strings,
+// booleans, funcs, chans.
+//
+// These primitive values are often members of more complex
+// structures (slices, maps, etc.) that are walkable by other interfaces.
+type PrimitiveWalker interface {
+ Primitive(reflect.Value) error
+}
+
+// InterfaceWalker implementations are able to handle interface values as they
+// are encountered during the walk.
+type InterfaceWalker interface {
+ Interface(reflect.Value) error
+}
+
+// MapWalker implementations are able to handle individual elements
+// found within a map structure.
+type MapWalker interface {
+ Map(m reflect.Value) error
+ MapElem(m, k, v reflect.Value) error
+}
+
+// SliceWalker implementations are able to handle slice elements found
+// within complex structures.
+type SliceWalker interface {
+ Slice(reflect.Value) error
+ SliceElem(int, reflect.Value) error
+}
+
+// ArrayWalker implementations are able to handle array elements found
+// within complex structures.
+type ArrayWalker interface {
+ Array(reflect.Value) error
+ ArrayElem(int, reflect.Value) error
+}
+
+// StructWalker is an interface that has methods that are called for
+// structs when a Walk is done.
+type StructWalker interface {
+ Struct(reflect.Value) error
+ StructField(reflect.StructField, reflect.Value) error
+}
+
+// EnterExitWalker implementations are notified before and after
+// they walk deeper into complex structures (into struct fields,
+// into slice elements, etc.)
+type EnterExitWalker interface {
+ Enter(Location) error
+ Exit(Location) error
+}
+
+// PointerWalker implementations are notified when the value they're
+// walking is a pointer or not. Pointer is called for _every_ value whether
+// it is a pointer or not.
+type PointerWalker interface {
+ PointerEnter(bool) error
+ PointerExit(bool) error
+}
+
+// SkipEntry can be returned from walk functions to skip walking
+// the value of this field. This is only valid in the following functions:
+//
+// - StructField: skips walking the struct value
+//
+var SkipEntry = errors.New("skip this entry")
+
+// Walk takes an arbitrary value and an interface and traverses the
+// value, calling callbacks on the interface if they are supported.
+// The interface should implement one or more of the walker interfaces
+// in this package, such as PrimitiveWalker, StructWalker, etc.
+func Walk(data, walker interface{}) (err error) {
+ v := reflect.ValueOf(data)
+ ew, ok := walker.(EnterExitWalker)
+ if ok {
+ err = ew.Enter(WalkLoc)
+ }
+
+ if err == nil {
+ err = walk(v, walker)
+ }
+
+ if ok && err == nil {
+ err = ew.Exit(WalkLoc)
+ }
+
+ return
+}
+
+func walk(v reflect.Value, w interface{}) (err error) {
+ // Determine if we're receiving a pointer and if so notify the walker.
+ // The logic here is convoluted but very important (tests will fail if
+ // almost any part is changed). I will try to explain here.
+ //
+ // First, we check if the value is an interface, if so, we really need
+ // to check the interface's VALUE to see whether it is a pointer.
+ //
+ // Check whether the value is then a pointer. If so, then set pointer
+ // to true to notify the user.
+ //
+ // If we still have a pointer or an interface after the indirections, then
+ // we unwrap another level
+ //
+ // At this time, we also set "v" to be the dereferenced value. This is
+ // because once we've unwrapped the pointer we want to use that value.
+ pointer := false
+ pointerV := v
+
+ for {
+ if pointerV.Kind() == reflect.Interface {
+ if iw, ok := w.(InterfaceWalker); ok {
+ if err = iw.Interface(pointerV); err != nil {
+ return
+ }
+ }
+
+ pointerV = pointerV.Elem()
+ }
+
+ if pointerV.Kind() == reflect.Ptr {
+ pointer = true
+ v = reflect.Indirect(pointerV)
+ }
+ if pw, ok := w.(PointerWalker); ok {
+ if err = pw.PointerEnter(pointer); err != nil {
+ return
+ }
+
+ defer func(pointer bool) {
+ if err != nil {
+ return
+ }
+
+ err = pw.PointerExit(pointer)
+ }(pointer)
+ }
+
+ if pointer {
+ pointerV = v
+ }
+ pointer = false
+
+ // If we still have a pointer or interface we have to indirect another level.
+ switch pointerV.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ continue
+ }
+ break
+ }
+
+ // We preserve the original value here because if it is an interface
+ // type, we want to pass that directly into the walkPrimitive, so that
+ // we can set it.
+ originalV := v
+ if v.Kind() == reflect.Interface {
+ v = v.Elem()
+ }
+
+ k := v.Kind()
+ if k >= reflect.Int && k <= reflect.Complex128 {
+ k = reflect.Int
+ }
+
+ switch k {
+ // Primitives
+ case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid:
+ err = walkPrimitive(originalV, w)
+ return
+ case reflect.Map:
+ err = walkMap(v, w)
+ return
+ case reflect.Slice:
+ err = walkSlice(v, w)
+ return
+ case reflect.Struct:
+ err = walkStruct(v, w)
+ return
+ case reflect.Array:
+ err = walkArray(v, w)
+ return
+ default:
+ panic("unsupported type: " + k.String())
+ }
+}
+
+func walkMap(v reflect.Value, w interface{}) error {
+ ew, ewok := w.(EnterExitWalker)
+ if ewok {
+ ew.Enter(Map)
+ }
+
+ if mw, ok := w.(MapWalker); ok {
+ if err := mw.Map(v); err != nil {
+ return err
+ }
+ }
+
+ for _, k := range v.MapKeys() {
+ kv := v.MapIndex(k)
+
+ if mw, ok := w.(MapWalker); ok {
+ if err := mw.MapElem(v, k, kv); err != nil {
+ return err
+ }
+ }
+
+ ew, ok := w.(EnterExitWalker)
+ if ok {
+ ew.Enter(MapKey)
+ }
+
+ if err := walk(k, w); err != nil {
+ return err
+ }
+
+ if ok {
+ ew.Exit(MapKey)
+ ew.Enter(MapValue)
+ }
+
+ if err := walk(kv, w); err != nil {
+ return err
+ }
+
+ if ok {
+ ew.Exit(MapValue)
+ }
+ }
+
+ if ewok {
+ ew.Exit(Map)
+ }
+
+ return nil
+}
+
+func walkPrimitive(v reflect.Value, w interface{}) error {
+ if pw, ok := w.(PrimitiveWalker); ok {
+ return pw.Primitive(v)
+ }
+
+ return nil
+}
+
+func walkSlice(v reflect.Value, w interface{}) (err error) {
+ ew, ok := w.(EnterExitWalker)
+ if ok {
+ ew.Enter(Slice)
+ }
+
+ if sw, ok := w.(SliceWalker); ok {
+ if err := sw.Slice(v); err != nil {
+ return err
+ }
+ }
+
+ for i := 0; i < v.Len(); i++ {
+ elem := v.Index(i)
+
+ if sw, ok := w.(SliceWalker); ok {
+ if err := sw.SliceElem(i, elem); err != nil {
+ return err
+ }
+ }
+
+ ew, ok := w.(EnterExitWalker)
+ if ok {
+ ew.Enter(SliceElem)
+ }
+
+ if err := walk(elem, w); err != nil {
+ return err
+ }
+
+ if ok {
+ ew.Exit(SliceElem)
+ }
+ }
+
+ ew, ok = w.(EnterExitWalker)
+ if ok {
+ ew.Exit(Slice)
+ }
+
+ return nil
+}
+
+func walkArray(v reflect.Value, w interface{}) (err error) {
+ ew, ok := w.(EnterExitWalker)
+ if ok {
+ ew.Enter(Array)
+ }
+
+ if aw, ok := w.(ArrayWalker); ok {
+ if err := aw.Array(v); err != nil {
+ return err
+ }
+ }
+
+ for i := 0; i < v.Len(); i++ {
+ elem := v.Index(i)
+
+ if aw, ok := w.(ArrayWalker); ok {
+ if err := aw.ArrayElem(i, elem); err != nil {
+ return err
+ }
+ }
+
+ ew, ok := w.(EnterExitWalker)
+ if ok {
+ ew.Enter(ArrayElem)
+ }
+
+ if err := walk(elem, w); err != nil {
+ return err
+ }
+
+ if ok {
+ ew.Exit(ArrayElem)
+ }
+ }
+
+ ew, ok = w.(EnterExitWalker)
+ if ok {
+ ew.Exit(Array)
+ }
+
+ return nil
+}
+
+func walkStruct(v reflect.Value, w interface{}) (err error) {
+ ew, ewok := w.(EnterExitWalker)
+ if ewok {
+ ew.Enter(Struct)
+ }
+
+ if sw, ok := w.(StructWalker); ok {
+ if err = sw.Struct(v); err != nil {
+ return
+ }
+ }
+
+ vt := v.Type()
+ for i := 0; i < vt.NumField(); i++ {
+ sf := vt.Field(i)
+ f := v.FieldByIndex([]int{i})
+
+ if sw, ok := w.(StructWalker); ok {
+ err = sw.StructField(sf, f)
+
+ // SkipEntry just pretends this field doesn't even exist
+ if err == SkipEntry {
+ continue
+ }
+
+ if err != nil {
+ return
+ }
+ }
+
+ ew, ok := w.(EnterExitWalker)
+ if ok {
+ ew.Enter(StructField)
+ }
+
+ err = walk(f, w)
+ if err != nil {
+ return
+ }
+
+ if ok {
+ ew.Exit(StructField)
+ }
+ }
+
+ if ewok {
+ ew.Exit(Struct)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE
new file mode 100644
index 00000000..488357b8
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013-2016 by Maxim Bublis <b@codemonkey.ru>
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md
new file mode 100644
index 00000000..b6aad1c8
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/README.md
@@ -0,0 +1,65 @@
+# UUID package for Go language
+
+[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid)
+[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid)
+[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid)
+
+This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs.
+
+With 100% test coverage and benchmarks out of box.
+
+Supported versions:
+* Version 1, based on timestamp and MAC address (RFC 4122)
+* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1)
+* Version 3, based on MD5 hashing (RFC 4122)
+* Version 4, based on random numbers (RFC 4122)
+* Version 5, based on SHA-1 hashing (RFC 4122)
+
+## Installation
+
+Use the `go` command:
+
+ $ go get github.com/satori/go.uuid
+
+## Requirements
+
+UUID package requires Go >= 1.2.
+
+## Example
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/satori/go.uuid"
+)
+
+func main() {
+ // Creating UUID Version 4
+ u1 := uuid.NewV4()
+ fmt.Printf("UUIDv4: %s\n", u1)
+
+ // Parsing UUID from string input
+ u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ if err != nil {
+ fmt.Printf("Something gone wrong: %s", err)
+ }
+ fmt.Printf("Successfully parsed: %s", u2)
+}
+```
+
+## Documentation
+
+[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project.
+
+## Links
+* [RFC 4122](http://tools.ietf.org/html/rfc4122)
+* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
+
+## Copyright
+
+Copyright (C) 2013-2016 by Maxim Bublis <b@codemonkey.ru>.
+
+UUID package released under MIT License.
+See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go
new file mode 100644
index 00000000..295f3fc2
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/uuid.go
@@ -0,0 +1,481 @@
+// Copyright (C) 2013-2015 by Maxim Bublis <b@codemonkey.ru>
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+// Package uuid provides implementation of Universally Unique Identifier (UUID).
+// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and
+// version 2 (as specified in DCE 1.1).
+package uuid
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha1"
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "net"
+ "os"
+ "sync"
+ "time"
+)
+
+// UUID layout variants.
+const (
+ VariantNCS = iota
+ VariantRFC4122
+ VariantMicrosoft
+ VariantFuture
+)
+
+// UUID DCE domains.
+const (
+ DomainPerson = iota
+ DomainGroup
+ DomainOrg
+)
+
+// Difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
+const epochStart = 122192928000000000
+
+// Used in string method conversion
+const dash byte = '-'
+
+// UUID v1/v2 storage.
+var (
+ storageMutex sync.Mutex
+ storageOnce sync.Once
+ epochFunc = unixTimeFunc
+ clockSequence uint16
+ lastTime uint64
+ hardwareAddr [6]byte
+ posixUID = uint32(os.Getuid())
+ posixGID = uint32(os.Getgid())
+)
+
+// String parse helpers.
+var (
+ urnPrefix = []byte("urn:uuid:")
+ byteGroups = []int{8, 4, 4, 4, 12}
+)
+
+func initClockSequence() {
+ buf := make([]byte, 2)
+ safeRandom(buf)
+ clockSequence = binary.BigEndian.Uint16(buf)
+}
+
+func initHardwareAddr() {
+ interfaces, err := net.Interfaces()
+ if err == nil {
+ for _, iface := range interfaces {
+ if len(iface.HardwareAddr) >= 6 {
+ copy(hardwareAddr[:], iface.HardwareAddr)
+ return
+ }
+ }
+ }
+
+ // Initialize hardwareAddr randomly in case
+ // of real network interfaces absence
+ safeRandom(hardwareAddr[:])
+
+ // Set multicast bit as recommended in RFC 4122
+ hardwareAddr[0] |= 0x01
+}
+
+func initStorage() {
+ initClockSequence()
+ initHardwareAddr()
+}
+
+func safeRandom(dest []byte) {
+ if _, err := rand.Read(dest); err != nil {
+ panic(err)
+ }
+}
+
+// Returns difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and current time.
+// This is default epoch calculation function.
+func unixTimeFunc() uint64 {
+ return epochStart + uint64(time.Now().UnixNano()/100)
+}
+
+// UUID representation compliant with specification
+// described in RFC 4122.
+type UUID [16]byte
+
+// NullUUID can be used with the standard sql package to represent a
+// UUID value that can be NULL in the database
+type NullUUID struct {
+ UUID UUID
+ Valid bool
+}
+
+// The nil UUID is special form of UUID that is specified to have all
+// 128 bits set to zero.
+var Nil = UUID{}
+
+// Predefined namespace UUIDs.
+var (
+ NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
+ NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
+ NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
+)
+
+// And returns result of binary AND of two UUIDs.
+func And(u1 UUID, u2 UUID) UUID {
+ u := UUID{}
+ for i := 0; i < 16; i++ {
+ u[i] = u1[i] & u2[i]
+ }
+ return u
+}
+
+// Or returns result of binary OR of two UUIDs.
+func Or(u1 UUID, u2 UUID) UUID {
+ u := UUID{}
+ for i := 0; i < 16; i++ {
+ u[i] = u1[i] | u2[i]
+ }
+ return u
+}
+
+// Equal returns true if u1 and u2 equals, otherwise returns false.
+func Equal(u1 UUID, u2 UUID) bool {
+ return bytes.Equal(u1[:], u2[:])
+}
+
+// Version returns algorithm version used to generate UUID.
+func (u UUID) Version() uint {
+ return uint(u[6] >> 4)
+}
+
+// Variant returns UUID layout variant.
+func (u UUID) Variant() uint {
+ switch {
+ case (u[8] & 0x80) == 0x00:
+ return VariantNCS
+ case (u[8]&0xc0)|0x80 == 0x80:
+ return VariantRFC4122
+ case (u[8]&0xe0)|0xc0 == 0xc0:
+ return VariantMicrosoft
+ }
+ return VariantFuture
+}
+
+// Bytes returns bytes slice representation of UUID.
+func (u UUID) Bytes() []byte {
+ return u[:]
+}
+
+// Returns canonical string representation of UUID:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
+func (u UUID) String() string {
+ buf := make([]byte, 36)
+
+ hex.Encode(buf[0:8], u[0:4])
+ buf[8] = dash
+ hex.Encode(buf[9:13], u[4:6])
+ buf[13] = dash
+ hex.Encode(buf[14:18], u[6:8])
+ buf[18] = dash
+ hex.Encode(buf[19:23], u[8:10])
+ buf[23] = dash
+ hex.Encode(buf[24:], u[10:])
+
+ return string(buf)
+}
+
+// SetVersion sets version bits.
+func (u *UUID) SetVersion(v byte) {
+ u[6] = (u[6] & 0x0f) | (v << 4)
+}
+
+// SetVariant sets variant bits as described in RFC 4122.
+func (u *UUID) SetVariant() {
+ u[8] = (u[8] & 0xbf) | 0x80
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The encoding is the same as returned by String.
+func (u UUID) MarshalText() (text []byte, err error) {
+ text = []byte(u.String())
+ return
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Following formats are supported:
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
+// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+func (u *UUID) UnmarshalText(text []byte) (err error) {
+ if len(text) < 32 {
+ err = fmt.Errorf("uuid: UUID string too short: %s", text)
+ return
+ }
+
+ t := text[:]
+ braced := false
+
+ if bytes.Equal(t[:9], urnPrefix) {
+ t = t[9:]
+ } else if t[0] == '{' {
+ braced = true
+ t = t[1:]
+ }
+
+ b := u[:]
+
+ for i, byteGroup := range byteGroups {
+ if i > 0 {
+ if t[0] != '-' {
+ err = fmt.Errorf("uuid: invalid string format")
+ return
+ }
+ t = t[1:]
+ }
+
+ if len(t) < byteGroup {
+ err = fmt.Errorf("uuid: UUID string too short: %s", text)
+ return
+ }
+
+ if i == 4 && len(t) > byteGroup &&
+ ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) {
+ err = fmt.Errorf("uuid: UUID string too long: %s", text)
+ return
+ }
+
+ _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup])
+ if err != nil {
+ return
+ }
+
+ t = t[byteGroup:]
+ b = b[byteGroup/2:]
+ }
+
+ return
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (u UUID) MarshalBinary() (data []byte, err error) {
+ data = u.Bytes()
+ return
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+// It will return error if the slice isn't 16 bytes long.
+func (u *UUID) UnmarshalBinary(data []byte) (err error) {
+ if len(data) != 16 {
+ err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
+ return
+ }
+ copy(u[:], data)
+
+ return
+}
+
+// Value implements the driver.Valuer interface.
+func (u UUID) Value() (driver.Value, error) {
+ return u.String(), nil
+}
+
+// Scan implements the sql.Scanner interface.
+// A 16-byte slice is handled by UnmarshalBinary, while
+// a longer byte slice or a string is handled by UnmarshalText.
+func (u *UUID) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case []byte:
+ if len(src) == 16 {
+ return u.UnmarshalBinary(src)
+ }
+ return u.UnmarshalText(src)
+
+ case string:
+ return u.UnmarshalText([]byte(src))
+ }
+
+ return fmt.Errorf("uuid: cannot convert %T to UUID", src)
+}
+
+// Value implements the driver.Valuer interface.
+func (u NullUUID) Value() (driver.Value, error) {
+ if !u.Valid {
+ return nil, nil
+ }
+ // Delegate to UUID Value function
+ return u.UUID.Value()
+}
+
+// Scan implements the sql.Scanner interface.
+func (u *NullUUID) Scan(src interface{}) error {
+ if src == nil {
+ u.UUID, u.Valid = Nil, false
+ return nil
+ }
+
+ // Delegate to UUID Scan function
+ u.Valid = true
+ return u.UUID.Scan(src)
+}
+
+// FromBytes returns UUID converted from raw byte slice input.
+// It will return error if the slice isn't 16 bytes long.
+func FromBytes(input []byte) (u UUID, err error) {
+ err = u.UnmarshalBinary(input)
+ return
+}
+
+// FromBytesOrNil returns UUID converted from raw byte slice input.
+// Same behavior as FromBytes, but returns a Nil UUID on error.
+func FromBytesOrNil(input []byte) UUID {
+ uuid, err := FromBytes(input)
+ if err != nil {
+ return Nil
+ }
+ return uuid
+}
+
+// FromString returns UUID parsed from string input.
+// Input is expected in a form accepted by UnmarshalText.
+func FromString(input string) (u UUID, err error) {
+ err = u.UnmarshalText([]byte(input))
+ return
+}
+
+// FromStringOrNil returns UUID parsed from string input.
+// Same behavior as FromString, but returns a Nil UUID on error.
+func FromStringOrNil(input string) UUID {
+ uuid, err := FromString(input)
+ if err != nil {
+ return Nil
+ }
+ return uuid
+}
+
+// Returns UUID v1/v2 storage state.
+// Returns epoch timestamp, clock sequence, and hardware address.
+func getStorage() (uint64, uint16, []byte) {
+ storageOnce.Do(initStorage)
+
+ storageMutex.Lock()
+ defer storageMutex.Unlock()
+
+ timeNow := epochFunc()
+ // Clock changed backwards since last UUID generation.
+ // Should increase clock sequence.
+ if timeNow <= lastTime {
+ clockSequence++
+ }
+ lastTime = timeNow
+
+ return timeNow, clockSequence, hardwareAddr[:]
+}
+
+// NewV1 returns UUID based on current timestamp and MAC address.
+func NewV1() UUID {
+ u := UUID{}
+
+ timeNow, clockSeq, hardwareAddr := getStorage()
+
+ binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
+ binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+ binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+ binary.BigEndian.PutUint16(u[8:], clockSeq)
+
+ copy(u[10:], hardwareAddr)
+
+ u.SetVersion(1)
+ u.SetVariant()
+
+ return u
+}
+
+// NewV2 returns DCE Security UUID based on POSIX UID/GID.
+func NewV2(domain byte) UUID {
+ u := UUID{}
+
+ timeNow, clockSeq, hardwareAddr := getStorage()
+
+ switch domain {
+ case DomainPerson:
+ binary.BigEndian.PutUint32(u[0:], posixUID)
+ case DomainGroup:
+ binary.BigEndian.PutUint32(u[0:], posixGID)
+ }
+
+ binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+ binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+ binary.BigEndian.PutUint16(u[8:], clockSeq)
+ u[9] = domain
+
+ copy(u[10:], hardwareAddr)
+
+ u.SetVersion(2)
+ u.SetVariant()
+
+ return u
+}
+
+// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
+func NewV3(ns UUID, name string) UUID {
+ u := newFromHash(md5.New(), ns, name)
+ u.SetVersion(3)
+ u.SetVariant()
+
+ return u
+}
+
+// NewV4 returns random generated UUID.
+func NewV4() UUID {
+ u := UUID{}
+ safeRandom(u[:])
+ u.SetVersion(4)
+ u.SetVariant()
+
+ return u
+}
+
+// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
+func NewV5(ns UUID, name string) UUID {
+ u := newFromHash(sha1.New(), ns, name)
+ u.SetVersion(5)
+ u.SetVariant()
+
+ return u
+}
+
+// Returns UUID based on hashing of namespace UUID and name.
+func newFromHash(h hash.Hash, ns UUID, name string) UUID {
+ u := UUID{}
+ h.Write(ns[:])
+ h.Write([]byte(name))
+ copy(u[:], h.Sum(nil))
+
+ return u
+}
diff --git a/vendor/github.com/vincent-petithory/dataurl/LICENSE b/vendor/github.com/vincent-petithory/dataurl/LICENSE
new file mode 100644
index 00000000..ae6cb62b
--- /dev/null
+++ b/vendor/github.com/vincent-petithory/dataurl/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Vincent Petithory
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/vincent-petithory/dataurl/README.md b/vendor/github.com/vincent-petithory/dataurl/README.md
new file mode 100644
index 00000000..1ac59ad2
--- /dev/null
+++ b/vendor/github.com/vincent-petithory/dataurl/README.md
@@ -0,0 +1,81 @@
+# Data URL Schemes for Go [![wercker status](https://app.wercker.com/status/6f9a2e144dfcc59e862c52459b452928/s "wercker status")](https://app.wercker.com/project/bykey/6f9a2e144dfcc59e862c52459b452928) [![GoDoc](https://godoc.org/github.com/vincent-petithory/dataurl?status.png)](https://godoc.org/github.com/vincent-petithory/dataurl)
+
+This package parses and generates Data URL Schemes for the Go language, according to [RFC 2397](http://tools.ietf.org/html/rfc2397).
+
+Data URLs are small chunks of data commonly used in browsers to display inline data,
+typically like small images, or when you use the FileReader API of the browser.
+
+Common use-cases:
+
+ * generate a data URL out of a `string`, `[]byte`, `io.Reader` for inclusion in HTML templates,
+ * parse a data URL sent by a browser in a http.Handler, and do something with the data (save to disk, etc.)
+ * ...
+
+Install the package with:
+~~~
+go get github.com/vincent-petithory/dataurl
+~~~
+
+## Usage
+
+~~~ go
+package main
+
+import (
+ "github.com/vincent-petithory/dataurl"
+ "fmt"
+)
+
+func main() {
+ dataURL, err := dataurl.DecodeString(`data:text/plain;charset=utf-8;base64,aGV5YQ==`)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+ fmt.Printf("content type: %s, data: %s\n", dataURL.MediaType.ContentType(), string(dataURL.Data))
+ // Output: content type: text/plain, data: heya
+}
+~~~
+
+From a `http.Handler`:
+
+~~~ go
+func handleDataURLUpload(w http.ResponseWriter, r *http.Request) {
+ dataURL, err := dataurl.Decode(r.Body)
+ defer r.Body.Close()
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ if dataURL.ContentType() == "image/png" {
+ ioutil.WriteFile("image.png", dataURL.Data, 0644)
+ } else {
+ http.Error(w, "not a png", http.StatusBadRequest)
+ }
+}
+~~~
+
+## Command
+
+For convenience, a `dataurl` command is provided to encode/decode dataurl streams.
+
+~~~
+dataurl - Encode or decode dataurl data and print to standard output
+
+Usage: dataurl [OPTION]... [FILE]
+
+ dataurl encodes or decodes FILE or standard input if FILE is - or omitted, and prints to standard output.
+ Unless -mimetype is used, when FILE is specified, dataurl will attempt to detect its mimetype using Go's mime.TypeByExtension (http://golang.org/pkg/mime/#TypeByExtension). If this fails or data is read from STDIN, the mimetype will default to application/octet-stream.
+
+Options:
+ -a=false: encode data using ascii instead of base64
+ -ascii=false: encode data using ascii instead of base64
+ -d=false: decode data instead of encoding
+ -decode=false: decode data instead of encoding
+ -m="": force the mimetype of the data to encode to this value
+ -mimetype="": force the mimetype of the data to encode to this value
+~~~
+
+## Contributing
+
+Feel free to file an issue/make a pull request if you find any bug, or want to suggest enhancements.
diff --git a/vendor/github.com/vincent-petithory/dataurl/dataurl.go b/vendor/github.com/vincent-petithory/dataurl/dataurl.go
new file mode 100644
index 00000000..bfd07654
--- /dev/null
+++ b/vendor/github.com/vincent-petithory/dataurl/dataurl.go
@@ -0,0 +1,280 @@
+package dataurl
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+const (
+ // EncodingBase64 is base64 encoding for the data url
+ EncodingBase64 = "base64"
+ // EncodingASCII is ascii encoding for the data url
+ EncodingASCII = "ascii"
+)
+
+func defaultMediaType() MediaType {
+ return MediaType{
+ "text",
+ "plain",
+ map[string]string{"charset": "US-ASCII"},
+ }
+}
+
+// MediaType is the combination of a media type, a media subtype
+// and optional parameters.
+type MediaType struct {
+ Type string
+ Subtype string
+ Params map[string]string
+}
+
+// ContentType returns the content type of the dataurl's data, in the form type/subtype.
+func (mt *MediaType) ContentType() string {
+ return fmt.Sprintf("%s/%s", mt.Type, mt.Subtype)
+}
+
+// String implements the Stringer interface.
+//
+// Params values are escaped with the Escape function, rather than in a quoted string.
+func (mt *MediaType) String() string {
+ var buf bytes.Buffer
+ for k, v := range mt.Params {
+ fmt.Fprintf(&buf, ";%s=%s", k, EscapeString(v))
+ }
+ return mt.ContentType() + (&buf).String()
+}
+
+// DataURL is the combination of a MediaType describing the type of its Data.
+type DataURL struct {
+ MediaType
+ Encoding string
+ Data []byte
+}
+
+// New returns a new DataURL initialized with data and
+// a MediaType parsed from mediatype and paramPairs.
+// mediatype must be of the form "type/subtype" or it will panic.
+// paramPairs must have an even number of elements or it will panic.
+// For more complex DataURL, initialize a DataURL struct.
+// The DataURL is initialized with base64 encoding.
+func New(data []byte, mediatype string, paramPairs ...string) *DataURL {
+ parts := strings.Split(mediatype, "/")
+ if len(parts) != 2 {
+ panic("dataurl: invalid mediatype")
+ }
+
+ nParams := len(paramPairs)
+ if nParams%2 != 0 {
+ panic("dataurl: requires an even number of param pairs")
+ }
+ params := make(map[string]string)
+ for i := 0; i < nParams; i += 2 {
+ params[paramPairs[i]] = paramPairs[i+1]
+ }
+
+ mt := MediaType{
+ parts[0],
+ parts[1],
+ params,
+ }
+ return &DataURL{
+ MediaType: mt,
+ Encoding: EncodingBase64,
+ Data: data,
+ }
+}
+
+// String implements the Stringer interface.
+//
+// Note: it doesn't guarantee the returned string is equal to
+// the initial source string that was used to create this DataURL.
+// The reasons for that are:
+// * Insertion of default values for MediaType that were maybe not in the initial string,
+// * Various ways to encode the MediaType parameters (quoted string or url encoded string, the latter is used),
+func (du *DataURL) String() string {
+ var buf bytes.Buffer
+ du.WriteTo(&buf)
+ return (&buf).String()
+}
+
+// WriteTo implements the WriterTo interface.
+// See the note about String().
+func (du *DataURL) WriteTo(w io.Writer) (n int64, err error) {
+ var ni int
+ ni, _ = fmt.Fprint(w, "data:")
+ n += int64(ni)
+
+ ni, _ = fmt.Fprint(w, du.MediaType.String())
+ n += int64(ni)
+
+ if du.Encoding == EncodingBase64 {
+ ni, _ = fmt.Fprint(w, ";base64")
+ n += int64(ni)
+ }
+
+ ni, _ = fmt.Fprint(w, ",")
+ n += int64(ni)
+
+ if du.Encoding == EncodingBase64 {
+ encoder := base64.NewEncoder(base64.StdEncoding, w)
+ ni, err = encoder.Write(du.Data)
+ if err != nil {
+ return
+ }
+ encoder.Close()
+ } else if du.Encoding == EncodingASCII {
+ ni, _ = fmt.Fprint(w, Escape(du.Data))
+ n += int64(ni)
+ } else {
+ err = fmt.Errorf("dataurl: invalid encoding %s", du.Encoding)
+ return
+ }
+
+ return
+}
+
+// UnmarshalText decodes a Data URL string and sets it to *du
+func (du *DataURL) UnmarshalText(text []byte) error {
+ decoded, err := DecodeString(string(text))
+ if err != nil {
+ return err
+ }
+ *du = *decoded
+ return nil
+}
+
+// MarshalText writes du as a Data URL
+func (du *DataURL) MarshalText() ([]byte, error) {
+ buf := bytes.NewBuffer(nil)
+ if _, err := du.WriteTo(buf); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+type encodedDataReader func(string) ([]byte, error)
+
+var asciiDataReader encodedDataReader = func(s string) ([]byte, error) {
+ us, err := Unescape(s)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(us), nil
+}
+
+var base64DataReader encodedDataReader = func(s string) ([]byte, error) {
+ data, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(data), nil
+}
+
+type parser struct {
+ du *DataURL
+ l *lexer
+ currentAttr string
+ unquoteParamVal bool
+ encodedDataReaderFn encodedDataReader
+}
+
+func (p *parser) parse() error {
+ for item := range p.l.items {
+ switch item.t {
+ case itemError:
+ return errors.New(item.String())
+ case itemMediaType:
+ p.du.MediaType.Type = item.val
+ // Should we clear the default
+ // "charset" parameter at this point?
+ delete(p.du.MediaType.Params, "charset")
+ case itemMediaSubType:
+ p.du.MediaType.Subtype = item.val
+ case itemParamAttr:
+ p.currentAttr = item.val
+ case itemLeftStringQuote:
+ p.unquoteParamVal = true
+ case itemParamVal:
+ val := item.val
+ if p.unquoteParamVal {
+ p.unquoteParamVal = false
+ us, err := strconv.Unquote("\"" + val + "\"")
+ if err != nil {
+ return err
+ }
+ val = us
+ } else {
+ us, err := UnescapeToString(val)
+ if err != nil {
+ return err
+ }
+ val = us
+ }
+ p.du.MediaType.Params[p.currentAttr] = val
+ case itemBase64Enc:
+ p.du.Encoding = EncodingBase64
+ p.encodedDataReaderFn = base64DataReader
+ case itemDataComma:
+ if p.encodedDataReaderFn == nil {
+ p.encodedDataReaderFn = asciiDataReader
+ }
+ case itemData:
+ reader, err := p.encodedDataReaderFn(item.val)
+ if err != nil {
+ return err
+ }
+ p.du.Data = reader
+ case itemEOF:
+ if p.du.Data == nil {
+ p.du.Data = []byte("")
+ }
+ return nil
+ }
+ }
+ panic("EOF not found")
+}
+
+// DecodeString decodes a Data URL scheme string.
+func DecodeString(s string) (*DataURL, error) {
+ du := &DataURL{
+ MediaType: defaultMediaType(),
+ Encoding: EncodingASCII,
+ }
+
+ parser := &parser{
+ du: du,
+ l: lex(s),
+ }
+ if err := parser.parse(); err != nil {
+ return nil, err
+ }
+ return du, nil
+}
+
+// Decode decodes a Data URL scheme from a io.Reader.
+func Decode(r io.Reader) (*DataURL, error) {
+ data, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ return DecodeString(string(data))
+}
+
+// EncodeBytes encodes the data bytes into a Data URL string, using base 64 encoding.
+//
+// The media type of data is detected using http.DetectContentType.
+func EncodeBytes(data []byte) string {
+ mt := http.DetectContentType(data)
+ // http.DetectContentType may add spurious spaces between ; and a parameter.
+ // The canonical way is to not have them.
+ cleanedMt := strings.Replace(mt, "; ", ";", -1)
+
+ return New(data, cleanedMt).String()
+}
diff --git a/vendor/github.com/vincent-petithory/dataurl/doc.go b/vendor/github.com/vincent-petithory/dataurl/doc.go
new file mode 100644
index 00000000..56461d04
--- /dev/null
+++ b/vendor/github.com/vincent-petithory/dataurl/doc.go
@@ -0,0 +1,28 @@
+/*
+Package dataurl parses Data URL Schemes
+according to RFC 2397
+(http://tools.ietf.org/html/rfc2397).
+
+Data URLs are small chunks of data commonly used in browsers to display inline data,
+typically like small images, or when you use the FileReader API of the browser.
+
+A dataurl looks like:
+
+ data:text/plain;charset=utf-8,A%20brief%20note
+
+Or, with base64 encoding:
+
+ data:image/vnd.microsoft.icon;name=golang%20favicon;base64,AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAD///8AVE44//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb/
+ /uF2/1ROOP////8A////AFROOP/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+4Xb//uF2//7hdv/+
+ ...
+ /6CcjP97c07/e3NO/1dOMf9BOiX/TkUn/2VXLf97c07/e3NO/6CcjP/h4uX/////AP///wD///8A
+ ////AP///wD///8A////AP///wDq6/H/3N/j/9fZ3f/q6/H/////AP///wD///8A////AP///wD/
+ //8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAA==
+
+Common functions are Decode and DecodeString to obtain a DataURL,
+and DataURL.String() and DataURL.WriteTo to generate a Data URL string.
+
+*/
+package dataurl
diff --git a/vendor/github.com/vincent-petithory/dataurl/lex.go b/vendor/github.com/vincent-petithory/dataurl/lex.go
new file mode 100644
index 00000000..1a8717f5
--- /dev/null
+++ b/vendor/github.com/vincent-petithory/dataurl/lex.go
@@ -0,0 +1,521 @@
+package dataurl
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type item struct {
+ t itemType
+ val string
+}
+
+func (i item) String() string {
+ switch i.t {
+ case itemEOF:
+ return "EOF"
+ case itemError:
+ return i.val
+ }
+ if len(i.val) > 10 {
+ return fmt.Sprintf("%.10q...", i.val)
+ }
+ return fmt.Sprintf("%q", i.val)
+}
+
+type itemType int
+
+const (
+ itemError itemType = iota
+ itemEOF
+
+ itemDataPrefix
+
+ itemMediaType
+ itemMediaSep
+ itemMediaSubType
+ itemParamSemicolon
+ itemParamAttr
+ itemParamEqual
+ itemLeftStringQuote
+ itemRightStringQuote
+ itemParamVal
+
+ itemBase64Enc
+
+ itemDataComma
+ itemData
+)
+
+const eof rune = -1
+
+func isTokenRune(r rune) bool {
+ return r <= unicode.MaxASCII &&
+ !unicode.IsControl(r) &&
+ !unicode.IsSpace(r) &&
+ !isTSpecialRune(r)
+}
+
+func isTSpecialRune(r rune) bool {
+ return r == '(' ||
+ r == ')' ||
+ r == '<' ||
+ r == '>' ||
+ r == '@' ||
+ r == ',' ||
+ r == ';' ||
+ r == ':' ||
+ r == '\\' ||
+ r == '"' ||
+ r == '/' ||
+ r == '[' ||
+ r == ']' ||
+ r == '?' ||
+ r == '='
+}
+
+// See http://tools.ietf.org/html/rfc2045
+// This doesn't include extension-token case
+// as it's handled separatly
+func isDiscreteType(s string) bool {
+ if strings.HasPrefix(s, "text") ||
+ strings.HasPrefix(s, "image") ||
+ strings.HasPrefix(s, "audio") ||
+ strings.HasPrefix(s, "video") ||
+ strings.HasPrefix(s, "application") {
+ return true
+ }
+ return false
+}
+
+// See http://tools.ietf.org/html/rfc2045
+// This doesn't include extension-token case
+// as it's handled separatly
+func isCompositeType(s string) bool {
+ if strings.HasPrefix(s, "message") ||
+ strings.HasPrefix(s, "multipart") {
+ return true
+ }
+ return false
+}
+
+func isURLCharRune(r rune) bool {
+ // We're a bit permissive here,
+ // by not including '%' in delims
+ // This is okay, since url unescaping will validate
+ // that later in the parser.
+ return r <= unicode.MaxASCII &&
+ !(r >= 0x00 && r <= 0x1F) && r != 0x7F && /* control */
+ // delims
+ r != ' ' &&
+ r != '<' &&
+ r != '>' &&
+ r != '#' &&
+ r != '"' &&
+ // unwise
+ r != '{' &&
+ r != '}' &&
+ r != '|' &&
+ r != '\\' &&
+ r != '^' &&
+ r != '[' &&
+ r != ']' &&
+ r != '`'
+}
+
+func isBase64Rune(r rune) bool {
+ return (r >= 'a' && r <= 'z') ||
+ (r >= 'A' && r <= 'Z') ||
+ (r >= '0' && r <= '9') ||
+ r == '+' ||
+ r == '/' ||
+ r == '=' ||
+ r == '\n'
+}
+
+type stateFn func(*lexer) stateFn
+
+// lexer lexes the data URL scheme input string.
+// The implementation is from the text/template/parser package.
+type lexer struct {
+ input string
+ start int
+ pos int
+ width int
+ seenBase64Item bool
+ items chan item
+}
+
+func (l *lexer) run() {
+ for state := lexBeforeDataPrefix; state != nil; {
+ state = state(l)
+ }
+ close(l.items)
+}
+
+func (l *lexer) emit(t itemType) {
+ l.items <- item{t, l.input[l.start:l.pos]}
+ l.start = l.pos
+}
+
+func (l *lexer) next() (r rune) {
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return eof
+ }
+ r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pos += l.width
+ return r
+}
+
+func (l *lexer) backup() {
+ l.pos -= l.width
+}
+
+func (l *lexer) ignore() {
+ l.start = l.pos
+}
+
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+ l.items <- item{itemError, fmt.Sprintf(format, args...)}
+ return nil
+}
+
+func lex(input string) *lexer {
+ l := &lexer{
+ input: input,
+ items: make(chan item),
+ }
+ go l.run() // Concurrently run state machine.
+ return l
+}
+
+const (
+ dataPrefix = "data:"
+ mediaSep = '/'
+ paramSemicolon = ';'
+ paramEqual = '='
+ dataComma = ','
+)
+
+// start lexing by detecting data prefix
+func lexBeforeDataPrefix(l *lexer) stateFn {
+ if strings.HasPrefix(l.input[l.pos:], dataPrefix) {
+ return lexDataPrefix
+ }
+ return l.errorf("missing data prefix")
+}
+
+// lex data prefix
+func lexDataPrefix(l *lexer) stateFn {
+ l.pos += len(dataPrefix)
+ l.emit(itemDataPrefix)
+ return lexAfterDataPrefix
+}
+
+// lex what's after data prefix.
+// it can be the media type/subtype separator,
+// the base64 encoding, or the comma preceding the data
+func lexAfterDataPrefix(l *lexer) stateFn {
+ switch r := l.next(); {
+ case r == paramSemicolon:
+ l.backup()
+ return lexParamSemicolon
+ case r == dataComma:
+ l.backup()
+ return lexDataComma
+ case r == eof:
+ return l.errorf("missing comma before data")
+ case r == 'x' || r == 'X':
+ if l.next() == '-' {
+ return lexXTokenMediaType
+ }
+ return lexInDiscreteMediaType
+ case isTokenRune(r):
+ return lexInDiscreteMediaType
+ default:
+ return l.errorf("invalid character after data prefix")
+ }
+}
+
+func lexXTokenMediaType(l *lexer) stateFn {
+ for {
+ switch r := l.next(); {
+ case r == mediaSep:
+ l.backup()
+ return lexMediaType
+ case r == eof:
+ return l.errorf("missing media type slash")
+ case isTokenRune(r):
+ default:
+ return l.errorf("invalid character for media type")
+ }
+ }
+}
+
+func lexInDiscreteMediaType(l *lexer) stateFn {
+ for {
+ switch r := l.next(); {
+ case r == mediaSep:
+ l.backup()
+ // check it's valid discrete type
+ if !isDiscreteType(l.input[l.start:l.pos]) &&
+ !isCompositeType(l.input[l.start:l.pos]) {
+ return l.errorf("invalid media type")
+ }
+ return lexMediaType
+ case r == eof:
+ return l.errorf("missing media type slash")
+ case isTokenRune(r):
+ default:
+ return l.errorf("invalid character for media type")
+ }
+ }
+}
+
+func lexMediaType(l *lexer) stateFn {
+ if l.pos > l.start {
+ l.emit(itemMediaType)
+ }
+ return lexMediaSep
+}
+
+func lexMediaSep(l *lexer) stateFn {
+ l.next()
+ l.emit(itemMediaSep)
+ return lexAfterMediaSep
+}
+
+func lexAfterMediaSep(l *lexer) stateFn {
+ for {
+ switch r := l.next(); {
+ case r == paramSemicolon || r == dataComma:
+ l.backup()
+ return lexMediaSubType
+ case r == eof:
+ return l.errorf("incomplete media type")
+ case isTokenRune(r):
+ default:
+ return l.errorf("invalid character for media subtype")
+ }
+ }
+}
+
+func lexMediaSubType(l *lexer) stateFn {
+ if l.pos > l.start {
+ l.emit(itemMediaSubType)
+ }
+ return lexAfterMediaSubType
+}
+
+func lexAfterMediaSubType(l *lexer) stateFn {
+ switch r := l.next(); {
+ case r == paramSemicolon:
+ l.backup()
+ return lexParamSemicolon
+ case r == dataComma:
+ l.backup()
+ return lexDataComma
+ case r == eof:
+ return l.errorf("missing comma before data")
+ default:
+ return l.errorf("expected semicolon or comma")
+ }
+}
+
+func lexParamSemicolon(l *lexer) stateFn {
+ l.next()
+ l.emit(itemParamSemicolon)
+ return lexAfterParamSemicolon
+}
+
+func lexAfterParamSemicolon(l *lexer) stateFn {
+ switch r := l.next(); {
+ case r == eof:
+ return l.errorf("unterminated parameter sequence")
+ case r == paramEqual || r == dataComma:
+ return l.errorf("unterminated parameter sequence")
+ case isTokenRune(r):
+ l.backup()
+ return lexInParamAttr
+ default:
+ return l.errorf("invalid character for parameter attribute")
+ }
+}
+
+func lexBase64Enc(l *lexer) stateFn {
+ if l.pos > l.start {
+ if v := l.input[l.start:l.pos]; v != "base64" {
+ return l.errorf("expected base64, got %s", v)
+ }
+ l.seenBase64Item = true
+ l.emit(itemBase64Enc)
+ }
+ return lexDataComma
+}
+
+func lexInParamAttr(l *lexer) stateFn {
+ for {
+ switch r := l.next(); {
+ case r == paramEqual:
+ l.backup()
+ return lexParamAttr
+ case r == dataComma:
+ l.backup()
+ return lexBase64Enc
+ case r == eof:
+ return l.errorf("unterminated parameter sequence")
+ case isTokenRune(r):
+ default:
+ return l.errorf("invalid character for parameter attribute")
+ }
+ }
+}
+
+func lexParamAttr(l *lexer) stateFn {
+ if l.pos > l.start {
+ l.emit(itemParamAttr)
+ }
+ return lexParamEqual
+}
+
+func lexParamEqual(l *lexer) stateFn {
+ l.next()
+ l.emit(itemParamEqual)
+ return lexAfterParamEqual
+}
+
+func lexAfterParamEqual(l *lexer) stateFn {
+ switch r := l.next(); {
+ case r == '"':
+ l.emit(itemLeftStringQuote)
+ return lexInQuotedStringParamVal
+ case r == eof:
+ return l.errorf("missing comma before data")
+ case isTokenRune(r):
+ return lexInParamVal
+ default:
+ return l.errorf("invalid character for parameter value")
+ }
+}
+
+func lexInQuotedStringParamVal(l *lexer) stateFn {
+ for {
+ switch r := l.next(); {
+ case r == eof:
+ return l.errorf("unclosed quoted string")
+ case r == '\\':
+ return lexEscapedChar
+ case r == '"':
+ l.backup()
+ return lexQuotedStringParamVal
+ case r <= unicode.MaxASCII:
+ default:
+ return l.errorf("invalid character for parameter value")
+ }
+ }
+}
+
+func lexEscapedChar(l *lexer) stateFn {
+ switch r := l.next(); {
+ case r <= unicode.MaxASCII:
+ return lexInQuotedStringParamVal
+ case r == eof:
+ return l.errorf("unexpected eof")
+ default:
+ return l.errorf("invalid escaped character")
+ }
+}
+
+func lexInParamVal(l *lexer) stateFn {
+ for {
+ switch r := l.next(); {
+ case r == paramSemicolon || r == dataComma:
+ l.backup()
+ return lexParamVal
+ case r == eof:
+ return l.errorf("missing comma before data")
+ case isTokenRune(r):
+ default:
+ return l.errorf("invalid character for parameter value")
+ }
+ }
+}
+
+func lexQuotedStringParamVal(l *lexer) stateFn {
+ if l.pos > l.start {
+ l.emit(itemParamVal)
+ }
+ l.next()
+ l.emit(itemRightStringQuote)
+ return lexAfterParamVal
+}
+
+func lexParamVal(l *lexer) stateFn {
+ if l.pos > l.start {
+ l.emit(itemParamVal)
+ }
+ return lexAfterParamVal
+}
+
+func lexAfterParamVal(l *lexer) stateFn {
+ switch r := l.next(); {
+ case r == paramSemicolon:
+ l.backup()
+ return lexParamSemicolon
+ case r == dataComma:
+ l.backup()
+ return lexDataComma
+ case r == eof:
+ return l.errorf("missing comma before data")
+ default:
+ return l.errorf("expected semicolon or comma")
+ }
+}
+
+func lexDataComma(l *lexer) stateFn {
+ l.next()
+ l.emit(itemDataComma)
+ if l.seenBase64Item {
+ return lexBase64Data
+ }
+ return lexData
+}
+
+func lexData(l *lexer) stateFn {
+Loop:
+ for {
+ switch r := l.next(); {
+ case r == eof:
+ break Loop
+ case isURLCharRune(r):
+ default:
+ return l.errorf("invalid data character")
+ }
+ }
+ if l.pos > l.start {
+ l.emit(itemData)
+ }
+ l.emit(itemEOF)
+ return nil
+}
+
+func lexBase64Data(l *lexer) stateFn {
+Loop:
+ for {
+ switch r := l.next(); {
+ case r == eof:
+ break Loop
+ case isBase64Rune(r):
+ default:
+ return l.errorf("invalid data character")
+ }
+ }
+ if l.pos > l.start {
+ l.emit(itemData)
+ }
+ l.emit(itemEOF)
+ return nil
+}
diff --git a/vendor/github.com/vincent-petithory/dataurl/rfc2396.go b/vendor/github.com/vincent-petithory/dataurl/rfc2396.go
new file mode 100644
index 00000000..e2ea0cac
--- /dev/null
+++ b/vendor/github.com/vincent-petithory/dataurl/rfc2396.go
@@ -0,0 +1,130 @@
+package dataurl
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// Escape implements URL escaping, as defined in RFC 2397 (http://tools.ietf.org/html/rfc2397).
+// It differs a bit from net/url's QueryEscape and QueryUnescape, e.g how spaces are treated (+ instead of %20):
+//
+// Only ASCII chars are allowed. Reserved chars are escaped to their %xx form.
+// Unreserved chars are [a-z], [A-Z], [0-9], and -_.!~*\().
+func Escape(data []byte) string {
+ var buf = new(bytes.Buffer)
+ for _, b := range data {
+ switch {
+ case isUnreserved(b):
+ buf.WriteByte(b)
+ default:
+ fmt.Fprintf(buf, "%%%02X", b)
+ }
+ }
+ return buf.String()
+}
+
+// EscapeString is like Escape, but taking
+// a string as argument.
+func EscapeString(s string) string {
+ return Escape([]byte(s))
+}
+
+// isUnreserved return true
+// if the byte c is an unreserved char,
+// as defined in RFC 2396.
+func isUnreserved(c byte) bool {
+ return (c >= 'a' && c <= 'z') ||
+ (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9') ||
+ c == '-' ||
+ c == '_' ||
+ c == '.' ||
+ c == '!' ||
+ c == '~' ||
+ c == '*' ||
+ c == '\'' ||
+ c == '(' ||
+ c == ')'
+}
+
+func isHex(c byte) bool {
+ switch {
+ case c >= 'a' && c <= 'f':
+ return true
+ case c >= 'A' && c <= 'F':
+ return true
+ case c >= '0' && c <= '9':
+ return true
+ }
+ return false
+}
+
+// borrowed from net/url/url.go
+func unhex(c byte) byte {
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0'
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10
+ }
+ return 0
+}
+
+// Unescape unescapes a character sequence
+// escaped with Escape(String?).
+func Unescape(s string) ([]byte, error) {
+ var buf = new(bytes.Buffer)
+ reader := strings.NewReader(s)
+
+ for {
+ r, size, err := reader.ReadRune()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ if size > 1 {
+ return nil, fmt.Errorf("rfc2396: non-ASCII char detected")
+ }
+
+ switch r {
+ case '%':
+ eb1, err := reader.ReadByte()
+ if err == io.EOF {
+ return nil, fmt.Errorf("rfc2396: unexpected end of unescape sequence")
+ }
+ if err != nil {
+ return nil, err
+ }
+ if !isHex(eb1) {
+ return nil, fmt.Errorf("rfc2396: invalid char 0x%x in unescape sequence", r)
+ }
+ eb0, err := reader.ReadByte()
+ if err == io.EOF {
+ return nil, fmt.Errorf("rfc2396: unexpected end of unescape sequence")
+ }
+ if err != nil {
+ return nil, err
+ }
+ if !isHex(eb0) {
+ return nil, fmt.Errorf("rfc2396: invalid char 0x%x in unescape sequence", r)
+ }
+ buf.WriteByte(unhex(eb0) + unhex(eb1)*16)
+ default:
+ buf.WriteByte(byte(r))
+ }
+ }
+ return buf.Bytes(), nil
+}
+
+// UnescapeToString is like Unescape, but returning
+// a string.
+func UnescapeToString(s string) (string, error) {
+ b, err := Unescape(s)
+ return string(b), err
+}
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 00000000..866d74a7
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 00000000..8da58fbf
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md
new file mode 100644
index 00000000..1884de6a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,131 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 00000000..95ec014e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,742 @@
+package yaml
+
+import (
+ "io"
+ "os"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// File read handler.
+func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_file.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_file_read_handler
+ parser.input_file = file
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+ return true
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// File write handler.
+func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_file.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_file_write_handler
+ emitter.output_file = file
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+ return true
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+ return true
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+ return true
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+ return true
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 00000000..052ecfcd
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,682 @@
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+
+ yaml_parser_set_input_string(&p.parser, b)
+
+ p.skip()
+ if p.event.typ != yaml_STREAM_START_EVENT {
+ panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return &p
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+func (p *parser) skip() {
+ if p.event.typ != yaml_NO_EVENT {
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ yaml_event_delete(&p.event)
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ switch p.event.typ {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+ }
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.skip()
+ n.children = append(n.children, p.parse())
+ if p.event.typ != yaml_DOCUMENT_END_EVENT {
+ panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.skip()
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[string]bool
+ mapType reflect.Type
+ terrors []string
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = defaultMapType.Elem()
+)
+
+func newDecoder() *decoder {
+ d := &decoder{mapType: defaultMapType}
+ d.aliases = make(map[string]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ switch n.kind {
+ case documentNode:
+ return d.document(n, out)
+ case aliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.kind {
+ case scalarNode:
+ good = d.scalar(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ an, ok := d.doc.anchors[n.value]
+ if !ok {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ if d.aliases[n.value] {
+ failf("anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n.value] = true
+ good = d.unmarshal(an, out)
+ delete(d.aliases, n.value)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.Kind() == reflect.Map && !out.CanAddr() {
+ resetMap(out)
+ } else {
+ out.Set(reflect.Zero(out.Type()))
+ }
+ return true
+ }
+ if s, ok := resolved.(string); ok && out.CanAddr() {
+ if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
+ err := u.UnmarshalText([]byte(s))
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == yaml_BINARY_TAG {
+ out.SetString(resolved.(string))
+ good = true
+ } else if resolved != nil {
+ out.SetString(n.value)
+ good = true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ good = true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ good = true
+ }
+ case uint64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ good = true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ good = true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ good = true
+ case int64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case float64:
+ out.SetFloat(resolved)
+ good = true
+ }
+ case reflect.Ptr:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ good = true
+ }
+ }
+ if !good {
+ d.terror(n, tag, out)
+ }
+ return good
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ l := len(n.children)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, yaml_SEQ_TAG, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ out.Set(out.Slice(0, j))
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Slice:
+ return d.mappingSlice(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ if d.mapType.Kind() == reflect.Map {
+ iface := out
+ out = reflect.MakeMap(d.mapType)
+ iface.Set(out)
+ } else {
+ slicev := reflect.New(d.mapType).Elem()
+ if !d.mappingSlice(n, slicev) {
+ return false
+ }
+ out.Set(slicev)
+ return true
+ }
+ default:
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ mapType := d.mapType
+ if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+ d.mapType = outt
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+ outt := out.Type()
+ if outt.Elem() != mapItemType {
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+
+ mapType := d.mapType
+ d.mapType = outt
+
+ var slice []MapItem
+ var l = len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ item := MapItem{}
+ k := reflect.ValueOf(&item.Key).Elem()
+ if d.unmarshal(n.children[i], k) {
+ v := reflect.ValueOf(&item.Value).Elem()
+ if d.unmarshal(n.children[i+1], v) {
+ slice = append(slice, item)
+ }
+ }
+ }
+ out.Set(reflect.ValueOf(slice))
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.children[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ an, ok := d.doc.anchors[n.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ an, ok := d.doc.anchors[ni.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ } else if ni.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 00000000..6ecdcb3c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1684 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an achor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceeded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceeded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceeded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceeded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 00000000..84f84995
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,306 @@
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+}
+
+func newEncoder() (e *encoder) {
+ e = &encoder{}
+ e.must(yaml_emitter_initialize(&e.emitter))
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
+ e.emit()
+ e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
+ e.emit()
+ return e
+}
+
+func (e *encoder) finish() {
+ e.must(yaml_document_end_event_initialize(&e.event, true))
+ e.emit()
+ e.emitter.open_ended = false
+ e.must(yaml_stream_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
+ e.must(false)
+ }
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ if !in.IsValid() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ if m, ok := iface.(Marshaler); ok {
+ v, err := m.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(v)
+ } else if m, ok := iface.(encoding.TextMarshaler); ok {
+ text, err := m.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice:
+ if in.Type().Elem() == mapItemType {
+ e.itemsv(tag, in)
+ } else {
+ e.slicev(tag, in)
+ }
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+ for _, item := range slice {
+ e.marshal("", reflect.ValueOf(item.Key))
+ e.marshal("", reflect.ValueOf(item.Value))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ f()
+ e.must(yaml_mapping_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ rtag, rs := resolve("", s)
+ if rtag == yaml_BINARY_TAG {
+ if tag == "" || tag == yaml_STR_TAG {
+ tag = rtag
+ s = rs.(string)
+ } else if tag == yaml_BINARY_TAG {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ } else {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ }
+ if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else if strings.Contains(s, "\n") {
+ style = yaml_LITERAL_SCALAR_STYLE
+ } else {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // FIXME: Handle 64 bits here.
+ s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 00000000..81d05dfe
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1095 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected <document start>", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 00000000..f4507917
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,394 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 00000000..232313cc
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,208 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+ {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt(plain[3:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, -int(intv)
+ } else {
+ return yaml_INT_TAG, -intv
+ }
+ }
+ }
+ // XXX Handle timestamps here.
+
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ if tag == yaml_BINARY_TAG {
+ return yaml_BINARY_TAG, in
+ }
+ if utf8.ValidString(in) {
+ return yaml_STR_TAG, in
+ }
+ return yaml_BINARY_TAG, encodeBase64(in)
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 00000000..2c9d5111
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2710 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // Check if we really need to fetch more tokens.
+ need_more_tokens := false
+
+ if parser.tokens_head == len(parser.tokens) {
+ // Queue is empty.
+ need_more_tokens = true
+ } else {
+ // Check if any potential simple key may occupy the head position.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+ if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+ need_more_tokens = true
+ break
+ }
+ }
+ }
+
+ // We are finished.
+ if !need_more_tokens {
+ break
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Remove obsolete potential simple keys.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+ // Check for a potential simple key for each flow level.
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+
+ // The specification requires that a simple key
+ //
+ // - is limited to a single line,
+ // - is shorter than 1024 characters.
+ if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ }
+ }
+ return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ // A simple key is required only when it is the first token in the current
+ // line. Therefore it is always allowed. But we add a check anyway.
+ if required && !parser.simple_key_allowed {
+ panic("should not happen")
+ }
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ }
+ simple_key.mark = parser.mark
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // Increase the flow level.
+ parser.flow_level++
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ }
+ return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if simple_key.possible {
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && !(s[0] == '!' && s[1] == 0) {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the tag is non-empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
+ if parser.flow_level > 0 &&
+ parser.buffer[parser.buffer_pos] == ':' &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found unexpected ':'")
+ return false
+ }
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab character that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violate indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 00000000..5958822f
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,104 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 00000000..190362f2
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,89 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ // If the output encoding is UTF-8, we don't need to recode the buffer.
+ if emitter.encoding == yaml_UTF8_ENCODING {
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+ }
+
+ // Recode the buffer into the raw buffer.
+ var low, high int
+ if emitter.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ pos := 0
+ for pos < emitter.buffer_pos {
+ // See the "reader.c" code for more details on UTF-8 encoding. Note
+ // that we assume that the buffer contains a valid UTF-8 sequence.
+
+ // Read the next UTF-8 character.
+ octet := emitter.buffer[pos]
+
+ var w int
+ var value rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, value = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, value = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, value = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, value = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = emitter.buffer[pos+k]
+ value = (value << 6) + (rune(octet) & 0x3F)
+ }
+ pos += w
+
+ // Write the character.
+ if value < 0x10000 {
+ var b [2]byte
+ b[high] = byte(value >> 8)
+ b[low] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
+ } else {
+ // Write the character using a surrogate pair (check "reader.c").
+ var b [4]byte
+ value -= 0x10000
+ b[high] = byte(0xD8 + (value >> 18))
+ b[low] = byte((value >> 10) & 0xFF)
+ b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
+ b[low+2] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
+ }
+ }
+
+ // Write the raw buffer.
+ if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ emitter.raw_buffer = emitter.raw_buffer[:0]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 00000000..36d6b883
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,346 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only unmarshalled if they are exported (have an upper case
+// first letter), and are unmarshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Does not apply to zero valued structs.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int "a,omitempty"
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshal("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 00000000..d60a6b6b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,716 @@
+package yaml
+
+import (
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occured.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_file io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_file io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 00000000..8110ce3c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}